Index: examples/BrainF/BrainF.cpp =================================================================== --- examples/BrainF/BrainF.cpp +++ examples/BrainF/BrainF.cpp @@ -154,7 +154,7 @@ "aberrormsg"); //declare i32 @puts(i8 *) - Function *puts_func = cast(module-> + auto *puts_func = cast(module-> getOrInsertFunction("puts", IntegerType::getInt32Ty(C), PointerType::getUnqual(IntegerType::getInt8Ty(C)), NULL)); Index: examples/BrainF/BrainFDriver.cpp =================================================================== --- examples/BrainF/BrainFDriver.cpp +++ examples/BrainF/BrainFDriver.cpp @@ -72,7 +72,7 @@ //Add main function so can be fully compiled void addMainFunction(Module *mod) { //define i32 @main(i32 %argc, i8 **%argv) - Function *main_func = cast(mod-> + auto *main_func = cast(mod-> getOrInsertFunction("main", IntegerType::getInt32Ty(mod->getContext()), IntegerType::getInt32Ty(mod->getContext()), PointerType::getUnqual(PointerType::getUnqual( Index: examples/Fibonacci/fibonacci.cpp =================================================================== --- examples/Fibonacci/fibonacci.cpp +++ examples/Fibonacci/fibonacci.cpp @@ -52,7 +52,7 @@ static Function *CreateFibFunction(Module *M, LLVMContext &Context) { // Create the fib function and insert it into module M. This function is said // to return an int and take an int parameter. - Function *FibF = + auto *FibF = cast(M->getOrInsertFunction("fib", Type::getInt32Ty(Context), Type::getInt32Ty(Context), nullptr)); Index: examples/HowToUseJIT/HowToUseJIT.cpp =================================================================== --- examples/HowToUseJIT/HowToUseJIT.cpp +++ examples/HowToUseJIT/HowToUseJIT.cpp @@ -70,7 +70,7 @@ // Create the add1 function entry and insert this entry into module M. The // function will have a return type of "int" and take an argument of "int". // The '0' terminates the list of argument types. - Function *Add1F = + auto *Add1F = cast(M->getOrInsertFunction("add1", Type::getInt32Ty(Context), Type::getInt32Ty(Context), nullptr)); @@ -101,7 +101,7 @@ // Now we're going to create function `foo', which returns an int and takes no // arguments. - Function *FooF = + auto *FooF = cast(M->getOrInsertFunction("foo", Type::getInt32Ty(Context), nullptr)); Index: examples/ParallelJIT/ParallelJIT.cpp =================================================================== --- examples/ParallelJIT/ParallelJIT.cpp +++ examples/ParallelJIT/ParallelJIT.cpp @@ -51,7 +51,7 @@ // Create the add1 function entry and insert this entry into module M. The // function will have a return type of "int" and take an argument of "int". // The '0' terminates the list of argument types. - Function *Add1F = + auto *Add1F = cast(M->getOrInsertFunction("add1", Type::getInt32Ty(M->getContext()), Type::getInt32Ty(M->getContext()), @@ -82,7 +82,7 @@ static Function *CreateFibFunction(Module *M) { // Create the fib function and insert it into module M. This function is said // to return an int and take an int parameter. - Function *FibF = + auto *FibF = cast(M->getOrInsertFunction("fib", Type::getInt32Ty(M->getContext()), Type::getInt32Ty(M->getContext()), Index: lib/Analysis/AliasAnalysis.cpp =================================================================== --- lib/Analysis/AliasAnalysis.cpp +++ lib/Analysis/AliasAnalysis.cpp @@ -676,7 +676,7 @@ } bool llvm::isNoAliasArgument(const Value *V) { - if (const Argument *A = dyn_cast(V)) + if (const auto *A = dyn_cast(V)) return A->hasNoAliasAttr(); return false; } @@ -688,7 +688,7 @@ return true; if (isNoAliasCall(V)) return true; - if (const Argument *A = dyn_cast(V)) + if (const auto *A = dyn_cast(V)) return A->hasNoAliasAttr() || A->hasByValAttr(); return false; } Index: lib/Analysis/AliasSetTracker.cpp =================================================================== --- lib/Analysis/AliasSetTracker.cpp +++ lib/Analysis/AliasSetTracker.cpp @@ -378,7 +378,7 @@ uint64_t Len; - if (ConstantInt *C = dyn_cast(MSI->getLength())) + if (auto *C = dyn_cast(MSI->getLength())) Len = C->getZExtValue(); else Len = MemoryLocation::UnknownSize; @@ -394,7 +394,7 @@ MTI->getAAMetadata(AAInfo); uint64_t Len; - if (ConstantInt *C = dyn_cast(MTI->getLength())) + if (auto *C = dyn_cast(MTI->getLength())) Len = C->getZExtValue(); else Len = MemoryLocation::UnknownSize; @@ -440,15 +440,15 @@ void AliasSetTracker::add(Instruction *I) { // Dispatch to one of the other add methods. - if (LoadInst *LI = dyn_cast(I)) + if (auto *LI = dyn_cast(I)) return add(LI); - if (StoreInst *SI = dyn_cast(I)) + if (auto *SI = dyn_cast(I)) return add(SI); - if (VAArgInst *VAAI = dyn_cast(I)) + if (auto *VAAI = dyn_cast(I)) return add(VAAI); - if (MemSetInst *MSI = dyn_cast(I)) + if (auto *MSI = dyn_cast(I)) return add(MSI); - if (MemTransferInst *MTI = dyn_cast(I)) + if (auto *MTI = dyn_cast(I)) return add(MTI); return addUnknown(I); } @@ -491,7 +491,7 @@ void AliasSetTracker::deleteValue(Value *PtrVal) { // If this is a call instruction, remove the callsite from the appropriate // AliasSet (if present). - if (Instruction *Inst = dyn_cast(PtrVal)) { + if (auto *Inst = dyn_cast(PtrVal)) { if (Inst->mayReadOrWriteMemory()) { // Scan all the alias sets to see if this call site is contained. for (iterator I = begin(), E = end(); I != E;) { Index: lib/Analysis/BasicAliasAnalysis.cpp =================================================================== --- lib/Analysis/BasicAliasAnalysis.cpp +++ lib/Analysis/BasicAliasAnalysis.cpp @@ -82,7 +82,7 @@ // If this is an argument that corresponds to a byval or noalias argument, // then it has not escaped before entering the function. Check if it escapes // inside the function. - if (const Argument *A = dyn_cast(V)) + if (const auto *A = dyn_cast(V)) if (A->hasByValAttr() || A->hasNoAliasAttr()) // Note even if the argument is marked nocapture, we still need to check // for copies made inside the function. The nocapture attribute only @@ -192,7 +192,7 @@ return V; } - if (const ConstantInt *Const = dyn_cast(V)) { + if (const auto *Const = dyn_cast(V)) { // If it's a constant, just convert it to an offset and remove the variable. // If we've been called recursively, the Offset bit width will be greater // than the constant's (the Offset's always as wide as the outermost call), @@ -203,8 +203,8 @@ return V; } - if (const BinaryOperator *BOp = dyn_cast(V)) { - if (ConstantInt *RHSC = dyn_cast(BOp->getOperand(1))) { + if (const auto *BOp = dyn_cast(V)) { + if (auto *RHSC = dyn_cast(BOp->getOperand(1))) { // If we've been called recursively, then Offset and Scale will be wider // than the BOp operands. We'll always zext it here as we'll process sign @@ -355,10 +355,10 @@ Decomposed.VarIndices.clear(); do { // See if this is a bitcast or GEP. - const Operator *Op = dyn_cast(V); + const auto *Op = dyn_cast(V); if (!Op) { // The only non-operator case we can handle are GlobalAliases. - if (const GlobalAlias *GA = dyn_cast(V)) { + if (const auto *GA = dyn_cast(V)) { if (!GA->isInterposable()) { V = GA->getAliasee(); continue; @@ -374,7 +374,7 @@ continue; } - const GEPOperator *GEPOp = dyn_cast(Op); + const auto *GEPOp = dyn_cast(Op); if (!GEPOp) { if (auto CS = ImmutableCallSite(V)) if (const Value *RV = CS.getReturnedArgOperand()) { @@ -384,7 +384,7 @@ // If it's not a GEP, hand it off to SimplifyInstruction to see if it // can come up with something. This matches what GetUnderlyingObject does. - if (const Instruction *I = dyn_cast(V)) + if (const auto *I = dyn_cast(V)) // TODO: Get a DominatorTree and AssumptionCache and use them here // (these are both now available in this function, but this should be // updated when GetUnderlyingObject is updated). TLI should be @@ -427,7 +427,7 @@ } // For an array/pointer, add the element offset, explicitly scaled. - if (const ConstantInt *CIdx = dyn_cast(Index)) { + if (const auto *CIdx = dyn_cast(Index)) { if (CIdx->isZero()) continue; Decomposed.OtherOffset += @@ -522,7 +522,7 @@ continue; // A global constant counts as local memory for our purposes. - if (const GlobalVariable *GV = dyn_cast(V)) { + if (const auto *GV = dyn_cast(V)) { // Note: this doesn't require GV to be "ODR" because it isn't legal for a // global to be marked constant in some modules and non-constant in // others. GV may even be a declaration, not a definition. @@ -534,7 +534,7 @@ } // If both select values point to local memory, then so does the select. - if (const SelectInst *SI = dyn_cast(V)) { + if (const auto *SI = dyn_cast(V)) { Worklist.push_back(SI->getTrueValue()); Worklist.push_back(SI->getFalseValue()); continue; @@ -542,7 +542,7 @@ // If all values incoming to a phi node point to local memory, then so does // the phi. - if (const PHINode *PN = dyn_cast(V)) { + if (const auto *PN = dyn_cast(V)) { // Don't bother inspecting phi nodes with many operands. if (PN->getNumIncomingValues() > MaxLookup) { Visited.clear(); @@ -659,7 +659,7 @@ } static bool isIntrinsicCall(ImmutableCallSite CS, Intrinsic::ID IID) { - const IntrinsicInst *II = dyn_cast(CS.getInstruction()); + const auto *II = dyn_cast(CS.getInstruction()); return II && II->getIntrinsicID() == IID; } @@ -725,7 +725,7 @@ // the current function not to the current function, and a tail callee // may reference them. if (isa(Object)) - if (const CallInst *CI = dyn_cast(CS.getInstruction())) + if (const auto *CI = dyn_cast(CS.getInstruction())) if (CI->isTailCall()) return MRI_NoModRef; @@ -878,9 +878,9 @@ V2Size == MemoryLocation::UnknownSize) return MayAlias; - ConstantInt *C1 = + auto *C1 = dyn_cast(GEP1->getOperand(GEP1->getNumOperands() - 1)); - ConstantInt *C2 = + auto *C2 = dyn_cast(GEP2->getOperand(GEP2->getNumOperands() - 1)); // If the last (struct) indices are constants and are equal, the other indices @@ -910,7 +910,7 @@ auto *Ty = GetElementPtrInst::getIndexedType( GEP1->getSourceElementType(), IntermediateIndices); - StructType *LastIndexedStruct = dyn_cast(Ty); + auto *LastIndexedStruct = dyn_cast(Ty); if (isa(Ty)) { // We know that: @@ -1078,7 +1078,7 @@ // If we have two gep instructions with must-alias or not-alias'ing base // pointers, figure out if the indexes to the GEP tell us anything about the // derived pointer. - if (const GEPOperator *GEP2 = dyn_cast(V2)) { + if (const auto *GEP2 = dyn_cast(V2)) { // Check for the GEP base being at a negative offset, this time in the other // direction. if (!GEP1MaxLookupReached && !GEP2MaxLookupReached && @@ -1292,7 +1292,7 @@ const Value *UnderV2) { // If the values are Selects with the same condition, we can do a more precise // check: just check for aliases between the values on corresponding arms. - if (const SelectInst *SI2 = dyn_cast(V2)) + if (const auto *SI2 = dyn_cast(V2)) if (SI->getCondition() == SI2->getCondition()) { AliasResult Alias = aliasCheck(SI->getTrueValue(), SISize, SIAAInfo, SI2->getTrueValue(), V2Size, V2AAInfo); @@ -1331,7 +1331,7 @@ // If the values are PHIs in the same block, we can do a more precise // as well as efficient check: just check for aliases between the values // on corresponding edges. - if (const PHINode *PN2 = dyn_cast(V2)) + if (const auto *PN2 = dyn_cast(V2)) if (PN2->getParent() == PN->getParent()) { LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo), MemoryLocation(V2, V2Size, V2AAInfo)); @@ -1379,7 +1379,7 @@ return MayAlias; if (EnableRecPhiAnalysis) - if (GEPOperator *PV1GEP = dyn_cast(PV1)) { + if (auto *PV1GEP = dyn_cast(PV1)) { // Check whether the incoming value is a GEP that advances the pointer // result of this PHI node (e.g. in a loop). If this is the case, we // would recurse and always get a MayAlias. Handle this case specially @@ -1466,10 +1466,10 @@ // Null values in the default address space don't point to any object, so they // don't alias any other pointer. - if (const ConstantPointerNull *CPN = dyn_cast(O1)) + if (const auto *CPN = dyn_cast(O1)) if (CPN->getType()->getAddressSpace() == 0) return NoAlias; - if (const ConstantPointerNull *CPN = dyn_cast(O2)) + if (const auto *CPN = dyn_cast(O2)) if (CPN->getType()->getAddressSpace() == 0) return NoAlias; @@ -1536,7 +1536,7 @@ std::swap(O1, O2); std::swap(V1AAInfo, V2AAInfo); } - if (const GEPOperator *GV1 = dyn_cast(V1)) { + if (const auto *GV1 = dyn_cast(V1)) { AliasResult Result = aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2); if (Result != MayAlias) @@ -1549,7 +1549,7 @@ std::swap(V1Size, V2Size); std::swap(V1AAInfo, V2AAInfo); } - if (const PHINode *PN = dyn_cast(V1)) { + if (const auto *PN = dyn_cast(V1)) { AliasResult Result = aliasPHI(PN, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2); if (Result != MayAlias) @@ -1562,7 +1562,7 @@ std::swap(V1Size, V2Size); std::swap(V1AAInfo, V2AAInfo); } - if (const SelectInst *S1 = dyn_cast(V1)) { + if (const auto *S1 = dyn_cast(V1)) { AliasResult Result = aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2); if (Result != MayAlias) @@ -1597,7 +1597,7 @@ if (V != V2) return false; - const Instruction *Inst = dyn_cast(V); + const auto *Inst = dyn_cast(V); if (!Inst) return true; Index: lib/Analysis/BranchProbabilityInfo.cpp =================================================================== --- lib/Analysis/BranchProbabilityInfo.cpp +++ lib/Analysis/BranchProbabilityInfo.cpp @@ -272,7 +272,7 @@ // set of blocks postdominated by a cold call. assert(!PostDominatedByColdCall.count(BB)); for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I) - if (const CallInst *CI = dyn_cast(I)) + if (const auto *CI = dyn_cast(I)) if (CI->hasFnAttr(Attribute::Cold)) { PostDominatedByColdCall.insert(BB); break; @@ -318,12 +318,12 @@ // Calculate Edge Weights using "Pointer Heuristics". Predict a comparsion // between two pointer or pointer and NULL will fail. bool BranchProbabilityInfo::calcPointerHeuristics(const BasicBlock *BB) { - const BranchInst *BI = dyn_cast(BB->getTerminator()); + const auto *BI = dyn_cast(BB->getTerminator()); if (!BI || !BI->isConditional()) return false; Value *Cond = BI->getCondition(); - ICmpInst *CI = dyn_cast(Cond); + auto *CI = dyn_cast(Cond); if (!CI || !CI->isEquality()) return false; @@ -411,25 +411,25 @@ } bool BranchProbabilityInfo::calcZeroHeuristics(const BasicBlock *BB) { - const BranchInst *BI = dyn_cast(BB->getTerminator()); + const auto *BI = dyn_cast(BB->getTerminator()); if (!BI || !BI->isConditional()) return false; Value *Cond = BI->getCondition(); - ICmpInst *CI = dyn_cast(Cond); + auto *CI = dyn_cast(Cond); if (!CI) return false; Value *RHS = CI->getOperand(1); - ConstantInt *CV = dyn_cast(RHS); + auto *CV = dyn_cast(RHS); if (!CV) return false; // If the LHS is the result of AND'ing a value with a single bit bitmask, // we don't have information about probabilities. - if (Instruction *LHS = dyn_cast(CI->getOperand(0))) + if (auto *LHS = dyn_cast(CI->getOperand(0))) if (LHS->getOpcode() == Instruction::And) - if (ConstantInt *AndRHS = dyn_cast(LHS->getOperand(1))) + if (auto *AndRHS = dyn_cast(LHS->getOperand(1))) if (AndRHS->getUniqueInteger().isPowerOf2()) return false; @@ -494,12 +494,12 @@ } bool BranchProbabilityInfo::calcFloatingPointHeuristics(const BasicBlock *BB) { - const BranchInst *BI = dyn_cast(BB->getTerminator()); + const auto *BI = dyn_cast(BB->getTerminator()); if (!BI || !BI->isConditional()) return false; Value *Cond = BI->getCondition(); - FCmpInst *FCmp = dyn_cast(Cond); + auto *FCmp = dyn_cast(Cond); if (!FCmp) return false; @@ -531,7 +531,7 @@ } bool BranchProbabilityInfo::calcInvokeHeuristics(const BasicBlock *BB) { - const InvokeInst *II = dyn_cast(BB->getTerminator()); + const auto *II = dyn_cast(BB->getTerminator()); if (!II) return false; Index: lib/Analysis/CGSCCPassManager.cpp =================================================================== --- lib/Analysis/CGSCCPassManager.cpp +++ lib/Analysis/CGSCCPassManager.cpp @@ -265,7 +265,7 @@ // Now walk all references. for (Instruction &I : instructions(F)) for (Value *Op : I.operand_values()) - if (Constant *C = dyn_cast(Op)) + if (auto *C = dyn_cast(Op)) if (Visited.insert(C).second) Worklist.push_back(C); Index: lib/Analysis/CaptureTracking.cpp =================================================================== --- lib/Analysis/CaptureTracking.cpp +++ lib/Analysis/CaptureTracking.cpp @@ -116,7 +116,7 @@ } bool shouldExplore(const Use *U) override { - Instruction *I = cast(U->getUser()); + auto *I = cast(U->getUser()); if (BeforeHere == I && !IncludeI) return false; @@ -230,7 +230,7 @@ while (!Worklist.empty()) { const Use *U = Worklist.pop_back_val(); - Instruction *I = cast(U->getUser()); + auto *I = cast(U->getUser()); V = U->get(); switch (I->getOpcode()) { @@ -329,7 +329,7 @@ // Don't count comparisons of a no-alias return value against null as // captures. This allows us to ignore comparisons of malloc results // with null, for example. - if (ConstantPointerNull *CPN = + if (auto *CPN = dyn_cast(I->getOperand(1))) if (CPN->getType()->getAddressSpace() == 0) if (isNoAliasCall(V->stripPointerCasts())) Index: lib/Analysis/CodeMetrics.cpp =================================================================== --- lib/Analysis/CodeMetrics.cpp +++ lib/Analysis/CodeMetrics.cpp @@ -31,7 +31,7 @@ appendSpeculatableOperands(const Value *V, SmallPtrSetImpl &Visited, SmallVectorImpl &Worklist) { - const User *U = dyn_cast(V); + const auto *U = dyn_cast(V); if (!U) return; @@ -79,7 +79,7 @@ for (auto &AssumeVH : AC->assumptions()) { if (!AssumeVH) continue; - Instruction *I = cast(AssumeVH); + auto *I = cast(AssumeVH); // Filter out call sites outside of the loop so we don't do a function's // worth of work for each of its loops (and, in the common case, ephemeral @@ -103,7 +103,7 @@ for (auto &AssumeVH : AC->assumptions()) { if (!AssumeVH) continue; - Instruction *I = cast(AssumeVH); + auto *I = cast(AssumeVH); assert(I->getParent()->getParent() == F && "Found assumption for the wrong function!"); @@ -154,7 +154,7 @@ } } - if (const AllocaInst *AI = dyn_cast(&I)) { + if (const auto *AI = dyn_cast(&I)) { if (!AI->isStaticAlloca()) this->usesDynamicAlloca = true; } @@ -165,14 +165,14 @@ if (I.getType()->isTokenTy() && I.isUsedOutsideOfBlock(BB)) notDuplicatable = true; - if (const CallInst *CI = dyn_cast(&I)) { + if (const auto *CI = dyn_cast(&I)) { if (CI->cannotDuplicate()) notDuplicatable = true; if (CI->isConvergent()) convergent = true; } - if (const InvokeInst *InvI = dyn_cast(&I)) + if (const auto *InvI = dyn_cast(&I)) if (InvI->cannotDuplicate()) notDuplicatable = true; Index: lib/Analysis/ConstantFolding.cpp =================================================================== --- lib/Analysis/ConstantFolding.cpp +++ lib/Analysis/ConstantFolding.cpp @@ -1754,7 +1754,7 @@ case Intrinsic::x86_sse_cvtss2si64: case Intrinsic::x86_sse2_cvtsd2si: case Intrinsic::x86_sse2_cvtsd2si64: - if (ConstantFP *FPOp = + if (auto *FPOp = dyn_cast_or_null(Op->getAggregateElement(0U))) return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), /*roundTowardZero=*/false, Ty); @@ -1762,7 +1762,7 @@ case Intrinsic::x86_sse_cvttss2si64: case Intrinsic::x86_sse2_cvttsd2si: case Intrinsic::x86_sse2_cvttsd2si64: - if (ConstantFP *FPOp = + if (auto *FPOp = dyn_cast_or_null(Op->getAggregateElement(0U))) return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(), /*roundTowardZero=*/true, Ty); @@ -2018,7 +2018,7 @@ return false; if (CS.getNumArgOperands() == 1) { - if (ConstantFP *OpC = dyn_cast(CS.getArgOperand(0))) { + if (auto *OpC = dyn_cast(CS.getArgOperand(0))) { const APFloat &Op = OpC->getValueAPF(); switch (Func) { case LibFunc::logl: @@ -2117,8 +2117,8 @@ } if (CS.getNumArgOperands() == 2) { - ConstantFP *Op0C = dyn_cast(CS.getArgOperand(0)); - ConstantFP *Op1C = dyn_cast(CS.getArgOperand(1)); + auto *Op0C = dyn_cast(CS.getArgOperand(0)); + auto *Op1C = dyn_cast(CS.getArgOperand(1)); if (Op0C && Op1C) { const APFloat &Op0 = Op0C->getValueAPF(); const APFloat &Op1 = Op1C->getValueAPF(); Index: lib/Analysis/CostModel.cpp =================================================================== --- lib/Analysis/CostModel.cpp +++ lib/Analysis/CostModel.cpp @@ -179,10 +179,10 @@ Value *L = BinOp->getOperand(0); Value *R = BinOp->getOperand(1); - ShuffleVectorInst *LS = dyn_cast(L); + auto *LS = dyn_cast(L); if (!LS && Level) return false; - ShuffleVectorInst *RS = dyn_cast(R); + auto *RS = dyn_cast(R); if (!RS && Level) return false; @@ -249,14 +249,14 @@ return false; // Need to extract the first element. - ConstantInt *CI = dyn_cast(ReduxRoot->getOperand(1)); + auto *CI = dyn_cast(ReduxRoot->getOperand(1)); unsigned Idx = ~0u; if (CI) Idx = CI->getZExtValue(); if (Idx != 0) return false; - BinaryOperator *RdxStart = dyn_cast(ReduxRoot->getOperand(0)); + auto *RdxStart = dyn_cast(ReduxRoot->getOperand(0)); if (!RdxStart) return false; @@ -312,14 +312,14 @@ return false; // Need to extract the first element. - ConstantInt *CI = dyn_cast(ReduxRoot->getOperand(1)); + auto *CI = dyn_cast(ReduxRoot->getOperand(1)); unsigned Idx = ~0u; if (CI) Idx = CI->getZExtValue(); if (Idx != 0) return false; - BinaryOperator *RdxStart = dyn_cast(ReduxRoot->getOperand(0)); + auto *RdxStart = dyn_cast(ReduxRoot->getOperand(0)); if (!RdxStart) return false; unsigned RdxOpcode = RdxStart->getOpcode(); @@ -421,7 +421,7 @@ Op2VK); } case Instruction::Select: { - const SelectInst *SI = cast(I); + const auto *SI = cast(I); Type *CondTy = SI->getCondition()->getType(); return TTI->getCmpSelInstrCost(I->getOpcode(), I->getType(), CondTy); } @@ -431,14 +431,14 @@ return TTI->getCmpSelInstrCost(I->getOpcode(), ValTy); } case Instruction::Store: { - const StoreInst *SI = cast(I); + const auto *SI = cast(I); Type *ValTy = SI->getValueOperand()->getType(); return TTI->getMemoryOpCost(I->getOpcode(), ValTy, SI->getAlignment(), SI->getPointerAddressSpace()); } case Instruction::Load: { - const LoadInst *LI = cast(I); + const auto *LI = cast(I); return TTI->getMemoryOpCost(I->getOpcode(), I->getType(), LI->getAlignment(), LI->getPointerAddressSpace()); @@ -460,8 +460,8 @@ return TTI->getCastInstrCost(I->getOpcode(), I->getType(), SrcTy); } case Instruction::ExtractElement: { - const ExtractElementInst * EEI = cast(I); - ConstantInt *CI = dyn_cast(I->getOperand(1)); + const auto * EEI = cast(I); + auto *CI = dyn_cast(I->getOperand(1)); unsigned Idx = -1; if (CI) Idx = CI->getZExtValue(); @@ -480,8 +480,8 @@ EEI->getOperand(0)->getType(), Idx); } case Instruction::InsertElement: { - const InsertElementInst * IE = cast(I); - ConstantInt *CI = dyn_cast(IE->getOperand(2)); + const auto * IE = cast(I); + auto *CI = dyn_cast(IE->getOperand(2)); unsigned Idx = -1; if (CI) Idx = CI->getZExtValue(); @@ -489,7 +489,7 @@ IE->getType(), Idx); } case Instruction::ShuffleVector: { - const ShuffleVectorInst *Shuffle = cast(I); + const auto *Shuffle = cast(I); Type *VecTypOp0 = Shuffle->getOperand(0)->getType(); unsigned NumVecElems = VecTypOp0->getVectorNumElements(); SmallVector Mask = Shuffle->getShuffleMask(); @@ -506,7 +506,7 @@ return -1; } case Instruction::Call: - if (const IntrinsicInst *II = dyn_cast(I)) { + if (const auto *II = dyn_cast(I)) { SmallVector Args; for (unsigned J = 0, JE = II->getNumArgOperands(); J != JE; ++J) Args.push_back(II->getArgOperand(J)); Index: lib/Analysis/Delinearization.cpp =================================================================== --- lib/Analysis/Delinearization.cpp +++ lib/Analysis/Delinearization.cpp @@ -70,11 +70,11 @@ } static Value *getPointerOperand(Instruction &Inst) { - if (LoadInst *Load = dyn_cast(&Inst)) + if (auto *Load = dyn_cast(&Inst)) return Load->getPointerOperand(); - else if (StoreInst *Store = dyn_cast(&Inst)) + else if (auto *Store = dyn_cast(&Inst)) return Store->getPointerOperand(); - else if (GetElementPtrInst *Gep = dyn_cast(&Inst)) + else if (auto *Gep = dyn_cast(&Inst)) return Gep->getPointerOperand(); return nullptr; } @@ -95,7 +95,7 @@ for (Loop *L = LI->getLoopFor(BB); L != nullptr; L = L->getParentLoop()) { const SCEV *AccessFn = SE->getSCEVAtScope(getPointerOperand(*Inst), L); - const SCEVUnknown *BasePointer = + const auto *BasePointer = dyn_cast(SE->getPointerBase(AccessFn)); // Do not delinearize if we cannot find the base pointer. if (!BasePointer) Index: lib/Analysis/DemandedBits.cpp =================================================================== --- lib/Analysis/DemandedBits.cpp +++ lib/Analysis/DemandedBits.cpp @@ -102,7 +102,7 @@ default: break; case Instruction::Call: case Instruction::Invoke: - if (const IntrinsicInst *II = dyn_cast(UserI)) + if (const auto *II = dyn_cast(UserI)) switch (II->getIntrinsicID()) { default: break; case Intrinsic::bswap: @@ -142,14 +142,14 @@ break; case Instruction::Shl: if (OperandNo == 0) - if (ConstantInt *CI = + if (auto *CI = dyn_cast(UserI->getOperand(1))) { uint64_t ShiftAmt = CI->getLimitedValue(BitWidth-1); AB = AOut.lshr(ShiftAmt); // If the shift is nuw/nsw, then the high bits are not dead // (because we've promised that they *must* be zero). - const ShlOperator *S = cast(UserI); + const auto *S = cast(UserI); if (S->hasNoSignedWrap()) AB |= APInt::getHighBitsSet(BitWidth, ShiftAmt+1); else if (S->hasNoUnsignedWrap()) @@ -158,7 +158,7 @@ break; case Instruction::LShr: if (OperandNo == 0) - if (ConstantInt *CI = + if (auto *CI = dyn_cast(UserI->getOperand(1))) { uint64_t ShiftAmt = CI->getLimitedValue(BitWidth-1); AB = AOut.shl(ShiftAmt); @@ -171,7 +171,7 @@ break; case Instruction::AShr: if (OperandNo == 0) - if (ConstantInt *CI = + if (auto *CI = dyn_cast(UserI->getOperand(1))) { uint64_t ShiftAmt = CI->getLimitedValue(BitWidth-1); AB = AOut.shl(ShiftAmt); @@ -279,7 +279,7 @@ // bits and add the instruction to the work list. For other instructions // add their operands to the work list (for integer values operands, mark // all bits as live). - if (IntegerType *IT = dyn_cast(I.getType())) { + if (auto *IT = dyn_cast(I.getType())) { if (AliveBits.try_emplace(&I, IT->getBitWidth(), 0).second) Worklist.push_back(&I); @@ -288,8 +288,8 @@ // Non-integer-typed instructions... for (Use &OI : I.operands()) { - if (Instruction *J = dyn_cast(OI)) { - if (IntegerType *IT = dyn_cast(J->getType())) + if (auto *J = dyn_cast(OI)) { + if (auto *IT = dyn_cast(J->getType())) AliveBits[J] = APInt::getAllOnesValue(IT->getBitWidth()); Worklist.push_back(J); } @@ -320,8 +320,8 @@ // existing set, if any, and if that changes the set of alive bits, the // operand is added to the work-list. for (Use &OI : UserI->operands()) { - if (Instruction *I = dyn_cast(OI)) { - if (IntegerType *IT = dyn_cast(I->getType())) { + if (auto *I = dyn_cast(OI)) { + if (auto *IT = dyn_cast(I->getType())) { unsigned BitWidth = IT->getBitWidth(); APInt AB = APInt::getAllOnesValue(BitWidth); if (UserI->getType()->isIntegerTy() && !AOut && Index: lib/Analysis/DependenceAnalysis.cpp =================================================================== --- lib/Analysis/DependenceAnalysis.cpp +++ lib/Analysis/DependenceAnalysis.cpp @@ -485,13 +485,13 @@ const SCEV *C2A1 = SE->getMulExpr(Y->getC(), X->getA()); const SCEV *A1B2 = SE->getMulExpr(X->getA(), Y->getB()); const SCEV *A2B1 = SE->getMulExpr(Y->getA(), X->getB()); - const SCEVConstant *C1A2_C2A1 = + const auto *C1A2_C2A1 = dyn_cast(SE->getMinusSCEV(C1A2, C2A1)); - const SCEVConstant *C1B2_C2B1 = + const auto *C1B2_C2B1 = dyn_cast(SE->getMinusSCEV(C1B2, C2B1)); - const SCEVConstant *A1B2_A2B1 = + const auto *A1B2_A2B1 = dyn_cast(SE->getMinusSCEV(A1B2, A2B1)); - const SCEVConstant *A2B1_A1B2 = + const auto *A2B1_A1B2 = dyn_cast(SE->getMinusSCEV(A2B1, A1B2)); if (!C1B2_C2B1 || !C1A2_C2A1 || !A1B2_A2B1 || !A2B1_A1B2) @@ -635,9 +635,9 @@ // operations have properties which this analysis does not understand. static bool isLoadOrStore(const Instruction *I) { - if (const LoadInst *LI = dyn_cast(I)) + if (const auto *LI = dyn_cast(I)) return LI->isUnordered(); - else if (const StoreInst *SI = dyn_cast(I)) + else if (const auto *SI = dyn_cast(I)) return SI->isUnordered(); return false; } @@ -645,9 +645,9 @@ static Value *getPointerOperand(Instruction *I) { - if (LoadInst *LI = dyn_cast(I)) + if (auto *LI = dyn_cast(I)) return LI->getPointerOperand(); - if (StoreInst *SI = dyn_cast(I)) + if (auto *SI = dyn_cast(I)) return SI->getPointerOperand(); llvm_unreachable("Value is not load or store instruction"); return nullptr; @@ -784,8 +784,8 @@ for (Subscript *Pair : Pairs) { const SCEV *Src = Pair->Src; const SCEV *Dst = Pair->Dst; - IntegerType *SrcTy = dyn_cast(Src->getType()); - IntegerType *DstTy = dyn_cast(Dst->getType()); + auto *SrcTy = dyn_cast(Src->getType()); + auto *DstTy = dyn_cast(Dst->getType()); if (SrcTy == nullptr || DstTy == nullptr) { assert(SrcTy == DstTy && "This function only unify integer types and " "expect Src and Dst share the same type " @@ -809,8 +809,8 @@ for (Subscript *Pair : Pairs) { const SCEV *Src = Pair->Src; const SCEV *Dst = Pair->Dst; - IntegerType *SrcTy = dyn_cast(Src->getType()); - IntegerType *DstTy = dyn_cast(Dst->getType()); + auto *SrcTy = dyn_cast(Src->getType()); + auto *DstTy = dyn_cast(Dst->getType()); if (SrcTy == nullptr || DstTy == nullptr) { assert(SrcTy == DstTy && "This function only unify integer types and " "expect Src and Dst share the same type " @@ -836,8 +836,8 @@ const SCEV *Dst = Pair->Dst; if ((isa(Src) && isa(Dst)) || (isa(Src) && isa(Dst))) { - const SCEVCastExpr *SrcCast = cast(Src); - const SCEVCastExpr *DstCast = cast(Dst); + const auto *SrcCast = cast(Src); + const auto *DstCast = cast(Dst); const SCEV *SrcCastOp = SrcCast->getOperand(); const SCEV *DstCastOp = DstCast->getOperand(); if (SrcCastOp->getType() == DstCastOp->getType()) { @@ -852,7 +852,7 @@ // Collect any loops mentioned in the set of "Loops". bool DependenceInfo::checkSrcSubscript(const SCEV *Src, const Loop *LoopNest, SmallBitVector &Loops) { - const SCEVAddRecExpr *AddRec = dyn_cast(Src); + const auto *AddRec = dyn_cast(Src); if (!AddRec) return isLoopInvariant(Src, LoopNest); const SCEV *Start = AddRec->getStart(); @@ -877,7 +877,7 @@ // Collect any loops mentioned in the set of "Loops". bool DependenceInfo::checkDstSubscript(const SCEV *Dst, const Loop *LoopNest, SmallBitVector &Loops) { - const SCEVAddRecExpr *AddRec = dyn_cast(Dst); + const auto *AddRec = dyn_cast(Dst); if (!AddRec) return isLoopInvariant(Dst, LoopNest); const SCEV *Start = AddRec->getStart(); @@ -943,8 +943,8 @@ isa(Y)) || (isa(X) && isa(Y))) { - const SCEVCastExpr *CX = cast(X); - const SCEVCastExpr *CY = cast(Y); + const auto *CX = cast(X); + const auto *CY = cast(Y); const SCEV *Xop = CX->getOperand(); const SCEV *Yop = CY->getOperand(); if (Xop->getType() == Yop->getType()) { @@ -1225,7 +1225,7 @@ Result.DV[Level].Distance = Delta; // = 0 return false; } - const SCEVConstant *ConstCoeff = dyn_cast(Coeff); + const auto *ConstCoeff = dyn_cast(Coeff); if (!ConstCoeff) return false; @@ -1244,7 +1244,7 @@ SE->getMulExpr(SE->getConstant(Delta->getType(), 2), ConstCoeff)); DEBUG(dbgs() << "\t Split iter = " << *SplitIter << "\n"); - const SCEVConstant *ConstDelta = dyn_cast(Delta); + const auto *ConstDelta = dyn_cast(Delta); if (!ConstDelta) return false; @@ -1428,9 +1428,9 @@ DEBUG(dbgs() << "\t Delta = " << *Delta << "\n"); NewConstraint.setLine(SrcCoeff, SE->getNegativeSCEV(DstCoeff), Delta, CurLoop); - const SCEVConstant *ConstDelta = dyn_cast(Delta); - const SCEVConstant *ConstSrcCoeff = dyn_cast(SrcCoeff); - const SCEVConstant *ConstDstCoeff = dyn_cast(DstCoeff); + const auto *ConstDelta = dyn_cast(Delta); + const auto *ConstSrcCoeff = dyn_cast(SrcCoeff); + const auto *ConstDstCoeff = dyn_cast(DstCoeff); if (!ConstDelta || !ConstSrcCoeff || !ConstDstCoeff) return false; @@ -1648,7 +1648,7 @@ } return false; // dependences caused by first iteration } - const SCEVConstant *ConstCoeff = dyn_cast(DstCoeff); + const auto *ConstCoeff = dyn_cast(DstCoeff); if (!ConstCoeff) return false; const SCEV *AbsCoeff = @@ -1757,7 +1757,7 @@ } return false; // dependences caused by first iteration } - const SCEVConstant *ConstCoeff = dyn_cast(SrcCoeff); + const auto *ConstCoeff = dyn_cast(SrcCoeff); if (!ConstCoeff) return false; const SCEV *AbsCoeff = @@ -1827,9 +1827,9 @@ Result.Consistent = false; const SCEV *Delta = SE->getMinusSCEV(DstConst, SrcConst); DEBUG(dbgs() << "\t Delta = " << *Delta << "\n"); - const SCEVConstant *ConstDelta = dyn_cast(Delta); - const SCEVConstant *ConstSrcCoeff = dyn_cast(SrcCoeff); - const SCEVConstant *ConstDstCoeff = dyn_cast(DstCoeff); + const auto *ConstDelta = dyn_cast(Delta); + const auto *ConstSrcCoeff = dyn_cast(SrcCoeff); + const auto *ConstDstCoeff = dyn_cast(DstCoeff); if (!ConstDelta || !ConstSrcCoeff || !ConstDstCoeff) return false; @@ -2075,8 +2075,8 @@ const SCEV *&SplitIter) const { DEBUG(dbgs() << " src = " << *Src << "\n"); DEBUG(dbgs() << " dst = " << *Dst << "\n"); - const SCEVAddRecExpr *SrcAddRec = dyn_cast(Src); - const SCEVAddRecExpr *DstAddRec = dyn_cast(Dst); + const auto *SrcAddRec = dyn_cast(Src); + const auto *DstAddRec = dyn_cast(Dst); if (SrcAddRec && DstAddRec) { const SCEV *SrcConst = SrcAddRec->getStart(); const SCEV *DstConst = DstAddRec->getStart(); @@ -2152,8 +2152,8 @@ DEBUG(dbgs() << " src = " << *Src << "\n"); DEBUG(dbgs() << " dst = " << *Dst << "\n"); - const SCEVAddRecExpr *SrcAddRec = dyn_cast(Src); - const SCEVAddRecExpr *DstAddRec = dyn_cast(Dst); + const auto *SrcAddRec = dyn_cast(Src); + const auto *DstAddRec = dyn_cast(Dst); if (SrcAddRec && DstAddRec) { SrcConst = SrcAddRec->getStart(); SrcCoeff = SrcAddRec->getStepRecurrence(*SE); @@ -2163,7 +2163,7 @@ DstLoop = DstAddRec->getLoop(); } else if (SrcAddRec) { - if (const SCEVAddRecExpr *tmpAddRec = + if (const auto *tmpAddRec = dyn_cast(SrcAddRec->getStart())) { SrcConst = tmpAddRec->getStart(); SrcCoeff = tmpAddRec->getStepRecurrence(*SE); @@ -2176,7 +2176,7 @@ llvm_unreachable("RDIV reached by surprising SCEVs"); } else if (DstAddRec) { - if (const SCEVAddRecExpr *tmpAddRec = + if (const auto *tmpAddRec = dyn_cast(DstAddRec->getStart())) { DstConst = tmpAddRec->getStart(); DstCoeff = tmpAddRec->getStepRecurrence(*SE); @@ -2258,7 +2258,7 @@ // Because we're looking for the constant at the end of the chain, // we can't quit the loop just because the GCD == 1. const SCEV *Coefficients = Src; - while (const SCEVAddRecExpr *AddRec = + while (const auto *AddRec = dyn_cast(Coefficients)) { const SCEV *Coeff = AddRec->getStepRecurrence(*SE); // If the coefficient is the product of a constant and other stuff, @@ -2277,7 +2277,7 @@ // Because we're looking for the constant at the end of the chain, // we can't quit the loop just because the GCD == 1. Coefficients = Dst; - while (const SCEVAddRecExpr *AddRec = + while (const auto *AddRec = dyn_cast(Coefficients)) { const SCEV *Coeff = AddRec->getStepRecurrence(*SE); // If the coefficient is the product of a constant and other stuff, @@ -2294,8 +2294,8 @@ APInt ExtraGCD = APInt::getNullValue(BitWidth); const SCEV *Delta = SE->getMinusSCEV(DstConst, SrcConst); DEBUG(dbgs() << " Delta = " << *Delta << "\n"); - const SCEVConstant *Constant = dyn_cast(Delta); - if (const SCEVAddExpr *Sum = dyn_cast(Delta)) { + const auto *Constant = dyn_cast(Delta); + if (const auto *Sum = dyn_cast(Delta)) { // If Delta is a sum of products, we may be able to make further progress. for (unsigned Op = 0, Ops = Sum->getNumOperands(); Op < Ops; Op++) { const SCEV *Operand = Sum->getOperand(Op); @@ -2303,7 +2303,7 @@ assert(!Constant && "Surprised to find multiple constants"); Constant = cast(Operand); } - else if (const SCEVMulExpr *Product = dyn_cast(Operand)) { + else if (const auto *Product = dyn_cast(Operand)) { // Search for constant operand to participate in GCD; // If none found; return false. const SCEVConstant *ConstOp = getConstantPart(Product); @@ -2347,7 +2347,7 @@ bool Improved = false; Coefficients = Src; - while (const SCEVAddRecExpr *AddRec = + while (const auto *AddRec = dyn_cast(Coefficients)) { Coefficients = AddRec->getStart(); const Loop *CurLoop = AddRec->getLoop(); @@ -2834,7 +2834,7 @@ CI[K].NegPart = Zero; CI[K].Iterations = nullptr; } - while (const SCEVAddRecExpr *AddRec = dyn_cast(Subscript)) { + while (const auto *AddRec = dyn_cast(Subscript)) { const Loop *L = AddRec->getLoop(); unsigned K = SrcFlag ? mapSrcLoop(L) : mapDstLoop(L); CI[K].Coeff = AddRec->getStepRecurrence(*SE); @@ -2908,7 +2908,7 @@ // corresponding to the j loop would yield b. const SCEV *DependenceInfo::findCoefficient(const SCEV *Expr, const Loop *TargetLoop) const { - const SCEVAddRecExpr *AddRec = dyn_cast(Expr); + const auto *AddRec = dyn_cast(Expr); if (!AddRec) return SE->getZero(Expr->getType()); if (AddRec->getLoop() == TargetLoop) @@ -2924,7 +2924,7 @@ // corresponding to the j loop would yield a*i + c*k. const SCEV *DependenceInfo::zeroCoefficient(const SCEV *Expr, const Loop *TargetLoop) const { - const SCEVAddRecExpr *AddRec = dyn_cast(Expr); + const auto *AddRec = dyn_cast(Expr); if (!AddRec) return Expr; // ignore if (AddRec->getLoop() == TargetLoop) @@ -2944,7 +2944,7 @@ const SCEV *DependenceInfo::addToCoefficient(const SCEV *Expr, const Loop *TargetLoop, const SCEV *Value) const { - const SCEVAddRecExpr *AddRec = dyn_cast(Expr); + const auto *AddRec = dyn_cast(Expr); if (!AddRec) // create a new addRec return SE->getAddRecExpr(Expr, Value, @@ -3039,8 +3039,8 @@ DEBUG(dbgs() << "\t\tSrc = " << *Src << "\n"); DEBUG(dbgs() << "\t\tDst = " << *Dst << "\n"); if (A->isZero()) { - const SCEVConstant *Bconst = dyn_cast(B); - const SCEVConstant *Cconst = dyn_cast(C); + const auto *Bconst = dyn_cast(B); + const auto *Cconst = dyn_cast(C); if (!Bconst || !Cconst) return false; APInt Beta = Bconst->getAPInt(); APInt Charlie = Cconst->getAPInt(); @@ -3054,8 +3054,8 @@ Consistent = false; } else if (B->isZero()) { - const SCEVConstant *Aconst = dyn_cast(A); - const SCEVConstant *Cconst = dyn_cast(C); + const auto *Aconst = dyn_cast(A); + const auto *Cconst = dyn_cast(C); if (!Aconst || !Cconst) return false; APInt Alpha = Aconst->getAPInt(); APInt Charlie = Cconst->getAPInt(); @@ -3068,8 +3068,8 @@ Consistent = false; } else if (isKnownPredicate(CmpInst::ICMP_EQ, A, B)) { - const SCEVConstant *Aconst = dyn_cast(A); - const SCEVConstant *Cconst = dyn_cast(C); + const auto *Aconst = dyn_cast(A); + const auto *Cconst = dyn_cast(C); if (!Aconst || !Cconst) return false; APInt Alpha = Aconst->getAPInt(); APInt Charlie = Cconst->getAPInt(); @@ -3188,9 +3188,9 @@ const SCEV *DstAccessFn = SE->getSCEVAtScope(DstPtr, DstLoop); - const SCEVUnknown *SrcBase = + const auto *SrcBase = dyn_cast(SE->getPointerBase(SrcAccessFn)); - const SCEVUnknown *DstBase = + const auto *DstBase = dyn_cast(SE->getPointerBase(DstAccessFn)); if (!SrcBase || !DstBase || SrcBase != DstBase) @@ -3203,8 +3203,8 @@ const SCEV *SrcSCEV = SE->getMinusSCEV(SrcAccessFn, SrcBase); const SCEV *DstSCEV = SE->getMinusSCEV(DstAccessFn, DstBase); - const SCEVAddRecExpr *SrcAR = dyn_cast(SrcSCEV); - const SCEVAddRecExpr *DstAR = dyn_cast(DstSCEV); + const auto *SrcAR = dyn_cast(SrcSCEV); + const auto *DstAR = dyn_cast(DstSCEV); if (!SrcAR || !DstAR || !SrcAR->isAffine() || !DstAR->isAffine()) return false; @@ -3330,8 +3330,8 @@ // See if there are GEPs we can use. bool UsefulGEP = false; - GEPOperator *SrcGEP = dyn_cast(SrcPtr); - GEPOperator *DstGEP = dyn_cast(DstPtr); + auto *SrcGEP = dyn_cast(SrcPtr); + auto *DstGEP = dyn_cast(DstPtr); if (SrcGEP && DstGEP && SrcGEP->getPointerOperandType() == DstGEP->getPointerOperandType()) { const SCEV *SrcPtrSCEV = SE->getSCEV(SrcGEP->getPointerOperand()); @@ -3764,8 +3764,8 @@ // See if there are GEPs we can use. bool UsefulGEP = false; - GEPOperator *SrcGEP = dyn_cast(SrcPtr); - GEPOperator *DstGEP = dyn_cast(DstPtr); + auto *SrcGEP = dyn_cast(SrcPtr); + auto *DstGEP = dyn_cast(DstPtr); if (SrcGEP && DstGEP && SrcGEP->getPointerOperandType() == DstGEP->getPointerOperandType()) { const SCEV *SrcPtrSCEV = SE->getSCEV(SrcGEP->getPointerOperand()); Index: lib/Analysis/DivergenceAnalysis.cpp =================================================================== --- lib/Analysis/DivergenceAnalysis.cpp +++ lib/Analysis/DivergenceAnalysis.cpp @@ -199,7 +199,7 @@ void DivergencePropagator::findUsersOutsideInfluenceRegion( Instruction &I, const DenseSet &InfluenceRegion) { for (User *U : I.users()) { - Instruction *UserInst = cast(U); + auto *UserInst = cast(U); if (!InfluenceRegion.count(UserInst->getParent())) { if (DV.insert(UserInst).second) Worklist.push_back(UserInst); @@ -240,7 +240,7 @@ void DivergencePropagator::exploreDataDependency(Value *V) { // Follow def-use chains of V. for (User *U : V->users()) { - Instruction *UserInst = cast(U); + auto *UserInst = cast(U); if (DV.insert(UserInst).second) Worklist.push_back(UserInst); } @@ -251,7 +251,7 @@ while (!Worklist.empty()) { Value *V = Worklist.back(); Worklist.pop_back(); - if (TerminatorInst *TI = dyn_cast(V)) { + if (auto *TI = dyn_cast(V)) { // Terminators with less than two successors won't introduce sync // dependency. Ignore them. if (TI->getNumSuccessors() > 1) @@ -308,9 +308,9 @@ return; const Value *FirstDivergentValue = *DivergentValues.begin(); const Function *F; - if (const Argument *Arg = dyn_cast(FirstDivergentValue)) { + if (const auto *Arg = dyn_cast(FirstDivergentValue)) { F = Arg->getParent(); - } else if (const Instruction *I = + } else if (const auto *I = dyn_cast(FirstDivergentValue)) { F = I->getParent()->getParent(); } else { Index: lib/Analysis/GlobalsModRef.cpp =================================================================== --- lib/Analysis/GlobalsModRef.cpp +++ lib/Analysis/GlobalsModRef.cpp @@ -197,7 +197,7 @@ if (auto *F = dyn_cast(V)) GAR->FunctionInfos.erase(F); - if (GlobalValue *GV = dyn_cast(V)) { + if (auto *GV = dyn_cast(V)) { if (GAR->NonAddressTakenGlobals.erase(GV)) { // This global might be an indirect global. If so, remove it and // remove any AllocRelatedValues for it. @@ -335,10 +335,10 @@ for (Use &U : V->uses()) { User *I = U.getUser(); - if (LoadInst *LI = dyn_cast(I)) { + if (auto *LI = dyn_cast(I)) { if (Readers) Readers->insert(LI->getParent()->getParent()); - } else if (StoreInst *SI = dyn_cast(I)) { + } else if (auto *SI = dyn_cast(I)) { if (V == SI->getOperand(1)) { if (Writers) Writers->insert(SI->getParent()->getParent()); @@ -363,10 +363,10 @@ return true; // Argument of an unknown call. } } - } else if (ICmpInst *ICI = dyn_cast(I)) { + } else if (auto *ICI = dyn_cast(I)) { if (!isa(ICI->getOperand(1))) return true; // Allow comparison against null. - } else if (Constant *C = dyn_cast(I)) { + } else if (auto *C = dyn_cast(I)) { // Ignore constants which don't have any live uses. if (isa(C) || C->isConstantUsed()) return true; @@ -398,14 +398,14 @@ // Walk the user list of the global. If we find anything other than a direct // load or store, bail out. for (User *U : GV->users()) { - if (LoadInst *LI = dyn_cast(U)) { + if (auto *LI = dyn_cast(U)) { // The pointer loaded from the global can only be used in simple ways: // we allow addressing of it and loading storing to it. We do *not* allow // storing the loaded pointer somewhere else or passing to a function. if (AnalyzeUsesOfPointer(LI)) return false; // Loaded pointer escapes. // TODO: Could try some IP mod/ref of the loaded pointer. - } else if (StoreInst *SI = dyn_cast(U)) { + } else if (auto *SI = dyn_cast(U)) { // Storing the global itself. if (SI->getOperand(0) == GV) return false; @@ -785,8 +785,8 @@ // If either of the underlying values is a global, they may be non-addr-taken // globals, which we can answer queries about. - const GlobalValue *GV1 = dyn_cast(UV1); - const GlobalValue *GV2 = dyn_cast(UV2); + const auto *GV1 = dyn_cast(UV1); + const auto *GV2 = dyn_cast(UV2); if (GV1 || GV2) { // If the global's address is taken, pretend we don't know it's a pointer to // the global. @@ -824,12 +824,12 @@ // so, we may be able to handle this. First check to see if the base pointer // is a direct load from an indirect global. GV1 = GV2 = nullptr; - if (const LoadInst *LI = dyn_cast(UV1)) - if (GlobalVariable *GV = dyn_cast(LI->getOperand(0))) + if (const auto *LI = dyn_cast(UV1)) + if (auto *GV = dyn_cast(LI->getOperand(0))) if (IndirectGlobals.count(GV)) GV1 = GV; - if (const LoadInst *LI = dyn_cast(UV2)) - if (const GlobalVariable *GV = dyn_cast(LI->getOperand(0))) + if (const auto *LI = dyn_cast(UV2)) + if (const auto *GV = dyn_cast(LI->getOperand(0))) if (IndirectGlobals.count(GV)) GV2 = GV; @@ -890,7 +890,7 @@ // If we are asking for mod/ref info of a direct call with a pointer to a // global we are tracking, return information if we have it. - if (const GlobalValue *GV = + if (const auto *GV = dyn_cast(GetUnderlyingObject(Loc.Ptr, DL))) if (GV->hasLocalLinkage()) if (const Function *F = CS.getCalledFunction()) Index: lib/Analysis/IVUsers.cpp =================================================================== --- lib/Analysis/IVUsers.cpp +++ lib/Analysis/IVUsers.cpp @@ -70,7 +70,7 @@ static bool isInteresting(const SCEV *S, const Instruction *I, const Loop *L, ScalarEvolution *SE, LoopInfo *LI) { // An addrec is interesting if it's affine or if it has an interesting start. - if (const SCEVAddRecExpr *AR = dyn_cast(S)) { + if (const auto *AR = dyn_cast(S)) { // Keep things simple. Don't touch loop-variant strides unless they're // only used outside the loop and we can simplify them. if (AR->getLoop() == L) @@ -85,7 +85,7 @@ } // An add is interesting if exactly one of its operands is interesting. - if (const SCEVAddExpr *Add = dyn_cast(S)) { + if (const auto *Add = dyn_cast(S)) { bool AnyInterestingYet = false; for (SCEVAddExpr::op_iterator OI = Add->op_begin(), OE = Add->op_end(); OI != OE; ++OI) @@ -172,7 +172,7 @@ SmallPtrSet UniqueUsers; for (Use &U : I->uses()) { - Instruction *User = cast(U.getUser()); + auto *User = cast(U.getUser()); if (!UniqueUsers.insert(User).second) continue; @@ -184,7 +184,7 @@ // headers. Otherwise, SCEVExpander will crash. BasicBlock *UseBB = User->getParent(); // A phi's use is live out of its predecessor block. - if (PHINode *PHI = dyn_cast(User)) { + if (auto *PHI = dyn_cast(User)) { unsigned OperandNo = U.getOperandNo(); unsigned ValNo = PHINode::getIncomingValueNumForOperand(OperandNo); UseBB = PHI->getIncomingBlock(ValNo); @@ -357,13 +357,13 @@ } static const SCEVAddRecExpr *findAddRecForLoop(const SCEV *S, const Loop *L) { - if (const SCEVAddRecExpr *AR = dyn_cast(S)) { + if (const auto *AR = dyn_cast(S)) { if (AR->getLoop() == L) return AR; return findAddRecForLoop(AR->getStart(), L); } - if (const SCEVAddExpr *Add = dyn_cast(S)) { + if (const auto *Add = dyn_cast(S)) { for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); I != E; ++I) if (const SCEVAddRecExpr *AR = findAddRecForLoop(*I, L)) Index: lib/Analysis/InlineCost.cpp =================================================================== --- lib/Analysis/InlineCost.cpp +++ lib/Analysis/InlineCost.cpp @@ -308,7 +308,7 @@ for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); GTI != GTE; ++GTI) { - ConstantInt *OpC = dyn_cast(GTI.getOperand()); + auto *OpC = dyn_cast(GTI.getOperand()); if (!OpC) if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand())) OpC = dyn_cast(SimpleOp); @@ -427,7 +427,7 @@ bool CallAnalyzer::visitBitCast(BitCastInst &I) { // Propagate constants through bitcasts. - Constant *COp = dyn_cast(I.getOperand(0)); + auto *COp = dyn_cast(I.getOperand(0)); if (!COp) COp = SimplifiedValues.lookup(I.getOperand(0)); if (COp) @@ -455,7 +455,7 @@ bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) { // Propagate constants through ptrtoint. - Constant *COp = dyn_cast(I.getOperand(0)); + auto *COp = dyn_cast(I.getOperand(0)); if (!COp) COp = SimplifiedValues.lookup(I.getOperand(0)); if (COp) @@ -492,7 +492,7 @@ bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) { // Propagate constants through ptrtoint. - Constant *COp = dyn_cast(I.getOperand(0)); + auto *COp = dyn_cast(I.getOperand(0)); if (!COp) COp = SimplifiedValues.lookup(I.getOperand(0)); if (COp) @@ -523,7 +523,7 @@ bool CallAnalyzer::visitCastInst(CastInst &I) { // Propagate constants through ptrtoint. - Constant *COp = dyn_cast(I.getOperand(0)); + auto *COp = dyn_cast(I.getOperand(0)); if (!COp) COp = SimplifiedValues.lookup(I.getOperand(0)); if (COp) @@ -540,7 +540,7 @@ bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) { Value *Operand = I.getOperand(0); - Constant *COp = dyn_cast(Operand); + auto *COp = dyn_cast(Operand); if (!COp) COp = SimplifiedValues.lookup(Operand); if (COp) { @@ -568,7 +568,7 @@ // caller. This will also trip if the callee function has a non-null // parameter attribute, but that's a less interesting case because hopefully // the callee would already have been simplified based on that. - if (Argument *A = dyn_cast(V)) + if (auto *A = dyn_cast(V)) if (paramHasAttr(A, Attribute::NonNull)) return true; @@ -601,7 +601,7 @@ // code. In future, we should elaborate this based on BPI and BFI in more // general threshold adjusting heuristics in updateThreshold(). Instruction *Instr = CS.getInstruction(); - if (InvokeInst *II = dyn_cast(Instr)) { + if (auto *II = dyn_cast(Instr)) { if (isa(II->getNormalDest()->getTerminator())) return false; } else if (isa(Instr->getParent()->getTerminator())) @@ -674,8 +674,8 @@ if (!isa(RHS)) if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS)) RHS = SimpleRHS; - if (Constant *CLHS = dyn_cast(LHS)) { - if (Constant *CRHS = dyn_cast(RHS)) + if (auto *CLHS = dyn_cast(LHS)) { + if (auto *CRHS = dyn_cast(RHS)) if (Constant *C = ConstantExpr::getCompare(I.getPredicate(), CLHS, CRHS)) { SimplifiedValues[&I] = C; @@ -773,7 +773,7 @@ else SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, DL); - if (Constant *C = dyn_cast_or_null(SimpleV)) { + if (auto *C = dyn_cast_or_null(SimpleV)) { SimplifiedValues[&I] = C; return true; } @@ -817,7 +817,7 @@ bool CallAnalyzer::visitExtractValue(ExtractValueInst &I) { // Constant folding for extract value is trivial. - Constant *C = dyn_cast(I.getAggregateOperand()); + auto *C = dyn_cast(I.getAggregateOperand()); if (!C) C = SimplifiedValues.lookup(I.getAggregateOperand()); if (C) { @@ -831,10 +831,10 @@ bool CallAnalyzer::visitInsertValue(InsertValueInst &I) { // Constant folding for insert value is trivial. - Constant *AggC = dyn_cast(I.getAggregateOperand()); + auto *AggC = dyn_cast(I.getAggregateOperand()); if (!AggC) AggC = SimplifiedValues.lookup(I.getAggregateOperand()); - Constant *InsertedC = dyn_cast(I.getInsertedValueOperand()); + auto *InsertedC = dyn_cast(I.getInsertedValueOperand()); if (!InsertedC) InsertedC = SimplifiedValues.lookup(I.getInsertedValueOperand()); if (AggC && InsertedC) { @@ -866,7 +866,7 @@ ConstantArgs.reserve(CS.arg_size()); for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); I != E; ++I) { - Constant *C = dyn_cast(*I); + auto *C = dyn_cast(*I); if (!C) C = dyn_cast_or_null(SimplifiedValues.lookup(*I)); if (!C) @@ -899,7 +899,7 @@ // Next check if it is an intrinsic we know about. // FIXME: Lift this into part of the InstVisitor. - if (IntrinsicInst *II = dyn_cast(CS.getInstruction())) { + if (auto *II = dyn_cast(CS.getInstruction())) { switch (II->getIntrinsicID()) { default: return Base::visitCallSite(CS); @@ -951,7 +951,7 @@ // Next, check if this happens to be an indirect function call to a known // function in this inline context. If not, we've done all we can. - Function *F = dyn_cast_or_null(SimplifiedValues.lookup(Callee)); + auto *F = dyn_cast_or_null(SimplifiedValues.lookup(Callee)); if (!F) return Base::visitCallSite(CS); @@ -1167,13 +1167,13 @@ SmallPtrSet Visited; Visited.insert(V); do { - if (GEPOperator *GEP = dyn_cast(V)) { + if (auto *GEP = dyn_cast(V)) { if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset)) return nullptr; V = GEP->getPointerOperand(); } else if (Operator::getOpcode(V) == Instruction::BitCast) { V = cast(V)->getOperand(0); - } else if (GlobalAlias *GA = dyn_cast(V)) { + } else if (auto *GA = dyn_cast(V)) { if (GA->isInterposable()) break; V = GA->getAliasee(); @@ -1234,7 +1234,7 @@ if (CS.isByValArgument(I)) { // We approximate the number of loads and stores needed by dividing the // size of the byval type by the target's pointer size. - PointerType *PTy = cast(CS.getArgument(I)->getType()); + auto *PTy = cast(CS.getArgument(I)->getType()); unsigned TypeSize = DL.getTypeSizeInBits(PTy->getElementType()); unsigned PointerSize = DL.getPointerSizeInBits(); // Ceiling division. @@ -1296,7 +1296,7 @@ for (Function::arg_iterator FAI = F.arg_begin(), FAE = F.arg_end(); FAI != FAE; ++FAI, ++CAI) { assert(CAI != CS.arg_end()); - if (Constant *C = dyn_cast(CAI)) + if (auto *C = dyn_cast(CAI)) SimplifiedValues[&*FAI] = C; Value *PtrArg = *CAI; @@ -1361,18 +1361,18 @@ // Add in the live successors by first checking whether we have terminator // that may be simplified based on the values simplified by this call. - if (BranchInst *BI = dyn_cast(TI)) { + if (auto *BI = dyn_cast(TI)) { if (BI->isConditional()) { Value *Cond = BI->getCondition(); - if (ConstantInt *SimpleCond = + if (auto *SimpleCond = dyn_cast_or_null(SimplifiedValues.lookup(Cond))) { BBWorklist.insert(BI->getSuccessor(SimpleCond->isZero() ? 1 : 0)); continue; } } - } else if (SwitchInst *SI = dyn_cast(TI)) { + } else if (auto *SI = dyn_cast(TI)) { Value *Cond = SI->getCondition(); - if (ConstantInt *SimpleCond = + if (auto *SimpleCond = dyn_cast_or_null(SimplifiedValues.lookup(Cond))) { BBWorklist.insert(SI->findCaseValue(SimpleCond).getCaseSuccessor()); continue; Index: lib/Analysis/InstructionSimplify.cpp =================================================================== --- lib/Analysis/InstructionSimplify.cpp +++ lib/Analysis/InstructionSimplify.cpp @@ -93,7 +93,7 @@ /// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"? static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS, Value *RHS) { - CmpInst *Cmp = dyn_cast(V); + auto *Cmp = dyn_cast(V); if (!Cmp) return false; CmpInst::Predicate CPred = Cmp->getPredicate(); @@ -106,7 +106,7 @@ /// Does the given value dominate the specified phi node? static bool ValueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) { - Instruction *I = dyn_cast(V); + auto *I = dyn_cast(V); if (!I) // Arguments and constants dominate all instructions. return true; @@ -149,7 +149,7 @@ return nullptr; // Check whether the expression has the form "(A op' B) op C". - if (BinaryOperator *Op0 = dyn_cast(LHS)) + if (auto *Op0 = dyn_cast(LHS)) if (Op0->getOpcode() == OpcodeToExpand) { // It does! Try turning it into "(A op C) op' (B op C)". Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS; @@ -172,7 +172,7 @@ } // Check whether the expression has the form "A op (B op' C)". - if (BinaryOperator *Op1 = dyn_cast(RHS)) + if (auto *Op1 = dyn_cast(RHS)) if (Op1->getOpcode() == OpcodeToExpand) { // It does! Try turning it into "(A op B) op' (A op C)". Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1); @@ -208,8 +208,8 @@ if (!MaxRecurse--) return nullptr; - BinaryOperator *Op0 = dyn_cast(LHS); - BinaryOperator *Op1 = dyn_cast(RHS); + auto *Op0 = dyn_cast(LHS); + auto *Op1 = dyn_cast(RHS); // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely. if (Op0 && Op0->getOpcode() == Opcode) { @@ -345,7 +345,7 @@ if ((FV && !TV) || (TV && !FV)) { // Check that the simplified value has the form "X op Y" where "op" is the // same as the original operation. - Instruction *Simplified = dyn_cast(FV ? FV : TV); + auto *Simplified = dyn_cast(FV ? FV : TV); if (Simplified && Simplified->getOpcode() == Opcode) { // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS". // We already know that "op" is the same as for the simplified value. See @@ -382,7 +382,7 @@ Pred = CmpInst::getSwappedPredicate(Pred); } assert(isa(LHS) && "Not comparing with a select instruction!"); - SelectInst *SI = cast(LHS); + auto *SI = cast(LHS); Value *Cond = SI->getCondition(); Value *TV = SI->getTrueValue(); Value *FV = SI->getFalseValue(); @@ -505,7 +505,7 @@ Pred = CmpInst::getSwappedPredicate(Pred); } assert(isa(LHS) && "Not comparing with a phi instruction!"); - PHINode *PI = cast(LHS); + auto *PI = cast(LHS); // Bail out if RHS and the phi may be mutually interdependent due to a loop. if (!ValueDominatesPHI(RHS, PI, Q.DT)) @@ -531,8 +531,8 @@ /// If not, this returns null. static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, const Query &Q, unsigned MaxRecurse) { - if (Constant *CLHS = dyn_cast(Op0)) { - if (Constant *CRHS = dyn_cast(Op1)) + if (auto *CLHS = dyn_cast(Op0)) { + if (auto *CRHS = dyn_cast(Op1)) return ConstantFoldBinaryOpOperands(Instruction::Add, CLHS, CRHS, Q.DL); // Canonicalize the constant to the RHS. @@ -612,14 +612,14 @@ SmallPtrSet Visited; Visited.insert(V); do { - if (GEPOperator *GEP = dyn_cast(V)) { + if (auto *GEP = dyn_cast(V)) { if ((!AllowNonInbounds && !GEP->isInBounds()) || !GEP->accumulateConstantOffset(DL, Offset)) break; V = GEP->getPointerOperand(); } else if (Operator::getOpcode(V) == Instruction::BitCast) { V = cast(V)->getOperand(0); - } else if (GlobalAlias *GA = dyn_cast(V)) { + } else if (auto *GA = dyn_cast(V)) { if (GA->isInterposable()) break; V = GA->getAliasee(); @@ -665,8 +665,8 @@ /// If not, this returns null. static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, const Query &Q, unsigned MaxRecurse) { - if (Constant *CLHS = dyn_cast(Op0)) - if (Constant *CRHS = dyn_cast(Op1)) + if (auto *CLHS = dyn_cast(Op0)) + if (auto *CRHS = dyn_cast(Op1)) return ConstantFoldBinaryOpOperands(Instruction::Sub, CLHS, CRHS, Q.DL); // X - undef -> undef @@ -807,8 +807,8 @@ /// returns null. static Value *SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF, const Query &Q, unsigned MaxRecurse) { - if (Constant *CLHS = dyn_cast(Op0)) { - if (Constant *CRHS = dyn_cast(Op1)) + if (auto *CLHS = dyn_cast(Op0)) { + if (auto *CRHS = dyn_cast(Op1)) return ConstantFoldBinaryOpOperands(Instruction::FAdd, CLHS, CRHS, Q.DL); // Canonicalize the constant to the RHS. @@ -833,7 +833,7 @@ else if (match(Op0, m_FSub(m_AnyZero(), m_Specific(Op1)))) SubOp = Op0; if (SubOp) { - Instruction *FSub = cast(SubOp); + auto *FSub = cast(SubOp); if ((FMF.noNaNs() || FSub->hasNoNaNs()) && (FMF.noInfs() || FSub->hasNoInfs())) return Constant::getNullValue(Op0->getType()); @@ -846,8 +846,8 @@ /// returns null. static Value *SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF, const Query &Q, unsigned MaxRecurse) { - if (Constant *CLHS = dyn_cast(Op0)) { - if (Constant *CRHS = dyn_cast(Op1)) + if (auto *CLHS = dyn_cast(Op0)) { + if (auto *CRHS = dyn_cast(Op1)) return ConstantFoldBinaryOpOperands(Instruction::FSub, CLHS, CRHS, Q.DL); } @@ -882,8 +882,8 @@ FastMathFlags FMF, const Query &Q, unsigned MaxRecurse) { - if (Constant *CLHS = dyn_cast(Op0)) { - if (Constant *CRHS = dyn_cast(Op1)) + if (auto *CLHS = dyn_cast(Op0)) { + if (auto *CRHS = dyn_cast(Op1)) return ConstantFoldBinaryOpOperands(Instruction::FMul, CLHS, CRHS, Q.DL); // Canonicalize the constant to the RHS. @@ -905,8 +905,8 @@ /// If not, this returns null. static Value *SimplifyMulInst(Value *Op0, Value *Op1, const Query &Q, unsigned MaxRecurse) { - if (Constant *CLHS = dyn_cast(Op0)) { - if (Constant *CRHS = dyn_cast(Op1)) + if (auto *CLHS = dyn_cast(Op0)) { + if (auto *CRHS = dyn_cast(Op1)) return ConstantFoldBinaryOpOperands(Instruction::Mul, CLHS, CRHS, Q.DL); // Canonicalize the constant to the RHS. @@ -1002,8 +1002,8 @@ /// If not, this returns null. static Value *SimplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, const Query &Q, unsigned MaxRecurse) { - if (Constant *C0 = dyn_cast(Op0)) - if (Constant *C1 = dyn_cast(Op1)) + if (auto *C0 = dyn_cast(Op0)) + if (auto *C1 = dyn_cast(Op1)) return ConstantFoldBinaryOpOperands(Opcode, C0, C1, Q.DL); bool isSigned = Opcode == Instruction::SDiv; @@ -1040,13 +1040,13 @@ Value *X = nullptr, *Y = nullptr; if (match(Op0, m_Mul(m_Value(X), m_Value(Y))) && (X == Op1 || Y == Op1)) { if (Y != Op1) std::swap(X, Y); // Ensure expression is (X * Y) / Y, Y = Op1 - OverflowingBinaryOperator *Mul = cast(Op0); + auto *Mul = cast(Op0); // If the Mul knows it does not overflow, then we are good to go. if ((isSigned && Mul->hasNoSignedWrap()) || (!isSigned && Mul->hasNoUnsignedWrap())) return X; // If X has the form X = A / Y then X * Y cannot overflow. - if (BinaryOperator *Div = dyn_cast(X)) + if (auto *Div = dyn_cast(X)) if (Div->getOpcode() == Opcode && Div->getOperand(1) == Y) return X; } @@ -1168,8 +1168,8 @@ /// If not, this returns null. static Value *SimplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, const Query &Q, unsigned MaxRecurse) { - if (Constant *C0 = dyn_cast(Op0)) - if (Constant *C1 = dyn_cast(Op1)) + if (auto *C0 = dyn_cast(Op0)) + if (auto *C1 = dyn_cast(Op1)) return ConstantFoldBinaryOpOperands(Opcode, C0, C1, Q.DL); // X % undef -> undef @@ -1288,7 +1288,7 @@ /// Returns true if a shift by \c Amount always yields undef. static bool isUndefShift(Value *Amount) { - Constant *C = dyn_cast(Amount); + auto *C = dyn_cast(Amount); if (!C) return false; @@ -1297,7 +1297,7 @@ return true; // Shifting by the bitwidth or more is undefined. - if (ConstantInt *CI = dyn_cast(C)) + if (auto *CI = dyn_cast(C)) if (CI->getValue().getLimitedValue() >= CI->getType()->getScalarSizeInBits()) return true; @@ -1317,8 +1317,8 @@ /// If not, this returns null. static Value *SimplifyShift(unsigned Opcode, Value *Op0, Value *Op1, const Query &Q, unsigned MaxRecurse) { - if (Constant *C0 = dyn_cast(Op0)) - if (Constant *C1 = dyn_cast(Op1)) + if (auto *C0 = dyn_cast(Op0)) + if (auto *C1 = dyn_cast(Op1)) return ConstantFoldBinaryOpOperands(Opcode, C0, C1, Q.DL); // 0 shift by X -> 0 @@ -1616,8 +1616,8 @@ /// If not, this returns null. static Value *SimplifyAndInst(Value *Op0, Value *Op1, const Query &Q, unsigned MaxRecurse) { - if (Constant *CLHS = dyn_cast(Op0)) { - if (Constant *CRHS = dyn_cast(Op1)) + if (auto *CLHS = dyn_cast(Op0)) { + if (auto *CRHS = dyn_cast(Op1)) return ConstantFoldBinaryOpOperands(Instruction::And, CLHS, CRHS, Q.DL); // Canonicalize the constant to the RHS. @@ -1818,8 +1818,8 @@ /// If not, this returns null. static Value *SimplifyOrInst(Value *Op0, Value *Op1, const Query &Q, unsigned MaxRecurse) { - if (Constant *CLHS = dyn_cast(Op0)) { - if (Constant *CRHS = dyn_cast(Op1)) + if (auto *CLHS = dyn_cast(Op0)) { + if (auto *CRHS = dyn_cast(Op1)) return ConstantFoldBinaryOpOperands(Instruction::Or, CLHS, CRHS, Q.DL); // Canonicalize the constant to the RHS. @@ -1898,8 +1898,8 @@ Value *C = nullptr, *D = nullptr; if (match(Op0, m_And(m_Value(A), m_Value(C))) && match(Op1, m_And(m_Value(B), m_Value(D)))) { - ConstantInt *C1 = dyn_cast(C); - ConstantInt *C2 = dyn_cast(D); + auto *C1 = dyn_cast(C); + auto *C2 = dyn_cast(D); if (C1 && C2 && (C1->getValue() == ~C2->getValue())) { // (A & C1)|(B & C2) // If we have: ((V + N) & C1) | (V & C2) @@ -1951,8 +1951,8 @@ /// If not, this returns null. static Value *SimplifyXorInst(Value *Op0, Value *Op1, const Query &Q, unsigned MaxRecurse) { - if (Constant *CLHS = dyn_cast(Op0)) { - if (Constant *CRHS = dyn_cast(Op1)) + if (auto *CLHS = dyn_cast(Op0)) { + if (auto *CRHS = dyn_cast(Op1)) return ConstantFoldBinaryOpOperands(Instruction::Xor, CLHS, CRHS, Q.DL); // Canonicalize the constant to the RHS. @@ -2010,10 +2010,10 @@ /// Helper function for analyzing max/min idioms. static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred, Value *LHS, Value *RHS) { - SelectInst *SI = dyn_cast(V); + auto *SI = dyn_cast(V); if (!SI) return nullptr; - CmpInst *Cmp = dyn_cast(SI->getCondition()); + auto *Cmp = dyn_cast(SI->getCondition()); if (!Cmp) return nullptr; Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1); @@ -2136,8 +2136,8 @@ // address, due to canonicalization and constant folding. if (isa(LHS) && (isa(RHS) || isa(RHS))) { - ConstantInt *LHSOffsetCI = dyn_cast(LHSOffset); - ConstantInt *RHSOffsetCI = dyn_cast(RHSOffset); + auto *LHSOffsetCI = dyn_cast(LHSOffset); + auto *RHSOffsetCI = dyn_cast(RHSOffset); uint64_t LHSSize, RHSSize; if (LHSOffsetCI && RHSOffsetCI && getObjectSize(LHS, LHSSize, DL, TLI) && @@ -2196,13 +2196,13 @@ // library (and, thus, could be malloc'ed by the implementation). auto IsAllocDisjoint = [](ArrayRef Objects) { return all_of(Objects, [](Value *V) { - if (const AllocaInst *AI = dyn_cast(V)) + if (const auto *AI = dyn_cast(V)) return AI->getParent() && AI->getFunction() && AI->isStaticAlloca(); - if (const GlobalValue *GV = dyn_cast(V)) + if (const auto *GV = dyn_cast(V)) return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() || GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) && !GV->isThreadLocal(); - if (const Argument *A = dyn_cast(V)) + if (const auto *A = dyn_cast(V)) return A->hasByValAttr(); return false; }); @@ -2501,8 +2501,8 @@ unsigned MaxRecurse) { Type *ITy = GetCompareTy(LHS); // The return type. - BinaryOperator *LBO = dyn_cast(LHS); - BinaryOperator *RBO = dyn_cast(RHS); + auto *LBO = dyn_cast(LHS); + auto *RBO = dyn_cast(RHS); if (MaxRecurse && (LBO || RBO)) { // Analyze the case when either LHS or RHS is an add instruction. Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr; @@ -2630,7 +2630,7 @@ // 0 - (zext X) pred C if (!CmpInst::isUnsigned(Pred) && match(LHS, m_Neg(m_ZExt(m_Value())))) { - if (ConstantInt *RHSC = dyn_cast(RHS)) { + if (auto *RHSC = dyn_cast(RHS)) { if (RHSC->getValue().isStrictlyPositive()) { if (Pred == ICmpInst::ICMP_SLT) return ConstantInt::getTrue(RHSC->getContext()); @@ -3013,8 +3013,8 @@ CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate; assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!"); - if (Constant *CLHS = dyn_cast(LHS)) { - if (Constant *CRHS = dyn_cast(RHS)) + if (auto *CLHS = dyn_cast(LHS)) { + if (auto *CRHS = dyn_cast(RHS)) return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI); // If we have a constant, make sure it is on the RHS. @@ -3074,13 +3074,13 @@ // if the integer type is the same size as the pointer type. if (MaxRecurse && isa(LI) && Q.DL.getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) { - if (Constant *RHSC = dyn_cast(RHS)) { + if (auto *RHSC = dyn_cast(RHS)) { // Transfer the cast to the constant. if (Value *V = SimplifyICmpInst(Pred, SrcOp, ConstantExpr::getIntToPtr(RHSC, SrcTy), Q, MaxRecurse-1)) return V; - } else if (PtrToIntInst *RI = dyn_cast(RHS)) { + } else if (auto *RI = dyn_cast(RHS)) { if (RI->getOperand(0)->getType() == SrcTy) // Compare without the cast. if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0), @@ -3092,7 +3092,7 @@ if (isa(LHS)) { // Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the // same type. - if (ZExtInst *RI = dyn_cast(RHS)) { + if (auto *RI = dyn_cast(RHS)) { if (MaxRecurse && SrcTy == RI->getOperand(0)->getType()) // Compare X and Y. Note that signed predicates become unsigned. if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred), @@ -3102,7 +3102,7 @@ } // Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended // too. If not, then try to deduce the result of the comparison. - else if (ConstantInt *CI = dyn_cast(RHS)) { + else if (auto *CI = dyn_cast(RHS)) { // Compute the constant that would happen if we truncated to SrcTy then // reextended to DstTy. Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy); @@ -3152,7 +3152,7 @@ if (isa(LHS)) { // Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the // same type. - if (SExtInst *RI = dyn_cast(RHS)) { + if (auto *RI = dyn_cast(RHS)) { if (MaxRecurse && SrcTy == RI->getOperand(0)->getType()) // Compare X and Y. Note that the predicate does not change. if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0), @@ -3161,7 +3161,7 @@ } // Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended // too. If not, then try to deduce the result of the comparison. - else if (ConstantInt *CI = dyn_cast(RHS)) { + else if (auto *CI = dyn_cast(RHS)) { // Compute the constant that would happen if we truncated to SrcTy then // reextended to DstTy. Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy); @@ -3252,8 +3252,8 @@ CRHS->getPointerOperand())) return C; - if (GetElementPtrInst *GLHS = dyn_cast(LHS)) { - if (GEPOperator *GRHS = dyn_cast(RHS)) { + if (auto *GLHS = dyn_cast(LHS)) { + if (auto *GRHS = dyn_cast(RHS)) { if (GLHS->getPointerOperand() == GRHS->getPointerOperand() && GLHS->hasAllConstantIndices() && GRHS->hasAllConstantIndices() && (ICmpInst::isEquality(Pred) || @@ -3323,8 +3323,8 @@ CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate; assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!"); - if (Constant *CLHS = dyn_cast(LHS)) { - if (Constant *CRHS = dyn_cast(RHS)) + if (auto *CLHS = dyn_cast(LHS)) { + if (auto *CRHS = dyn_cast(RHS)) return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI); // If we have a constant, make sure it is on the RHS. @@ -3486,7 +3486,7 @@ } // Same for CmpInsts. - if (CmpInst *C = dyn_cast(I)) { + if (auto *C = dyn_cast(I)) { if (MaxRecurse) { if (C->getOperand(0) == Op) return SimplifyCmpInst(C->getPredicate(), RepOp, C->getOperand(1), Q, @@ -3501,13 +3501,13 @@ // If all operands are constant after substituting Op for RepOp then we can // constant fold the instruction. - if (Constant *CRepOp = dyn_cast(RepOp)) { + if (auto *CRepOp = dyn_cast(RepOp)) { // Build a list of all constant operands. SmallVector ConstOps; for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { if (I->getOperand(i) == Op) ConstOps.push_back(CRepOp); - else if (Constant *COp = dyn_cast(I->getOperand(i))) + else if (auto *COp = dyn_cast(I->getOperand(i))) ConstOps.push_back(COp); else break; @@ -3515,11 +3515,11 @@ // All operands were constants, fold it. if (ConstOps.size() == I->getNumOperands()) { - if (CmpInst *C = dyn_cast(I)) + if (auto *C = dyn_cast(I)) return ConstantFoldCompareInstOperands(C->getPredicate(), ConstOps[0], ConstOps[1], Q.DL, Q.TLI); - if (LoadInst *LI = dyn_cast(I)) + if (auto *LI = dyn_cast(I)) if (!LI->isVolatile()) return ConstantFoldLoadFromConstPtr(ConstOps[0], LI->getType(), Q.DL); @@ -3681,7 +3681,7 @@ unsigned MaxRecurse) { // select true, X, Y -> X // select false, X, Y -> Y - if (Constant *CB = dyn_cast(CondVal)) { + if (auto *CB = dyn_cast(CondVal)) { if (CB->isAllOnesValue()) return TrueVal; if (CB->isNullValue()) @@ -3733,7 +3733,7 @@ // Compute the (pointer) type returned by the GEP instruction. Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Ops.slice(1)); Type *GEPTy = PointerType::get(LastType, AS); - if (VectorType *VT = dyn_cast(Ops[0]->getType())) + if (auto *VT = dyn_cast(Ops[0]->getType())) GEPTy = VectorType::get(GEPTy, VT->getNumElements()); if (isa(Ops[0])) @@ -3842,8 +3842,8 @@ static Value *SimplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef Idxs, const Query &Q, unsigned) { - if (Constant *CAgg = dyn_cast(Agg)) - if (Constant *CVal = dyn_cast(Val)) + if (auto *CAgg = dyn_cast(Agg)) + if (auto *CVal = dyn_cast(Val)) return ConstantFoldInsertValueInstruction(CAgg, CVal, Idxs); // insertvalue x, undef, n -> x @@ -3851,7 +3851,7 @@ return Agg; // insertvalue x, (extractvalue y, n), n - if (ExtractValueInst *EV = dyn_cast(Val)) + if (auto *EV = dyn_cast(Val)) if (EV->getAggregateOperand()->getType() == Agg->getType() && EV->getIndices() == Idxs) { // insertvalue undef, (extractvalue y, n), n -> y @@ -4058,8 +4058,8 @@ case Instruction::Or: return SimplifyOrInst (LHS, RHS, Q, MaxRecurse); case Instruction::Xor: return SimplifyXorInst(LHS, RHS, Q, MaxRecurse); default: - if (Constant *CLHS = dyn_cast(LHS)) - if (Constant *CRHS = dyn_cast(RHS)) + if (auto *CLHS = dyn_cast(LHS)) + if (auto *CRHS = dyn_cast(RHS)) return ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, Q.DL); // If the operation is associative, try some generic simplifications. @@ -4287,7 +4287,7 @@ // Unary Ops if (NumOperands == 1) - if (IntrinsicInst *II = dyn_cast(*ArgBegin)) + if (auto *II = dyn_cast(*ArgBegin)) if (II->getIntrinsicID() == IID) return II; @@ -4298,16 +4298,16 @@ static Value *SimplifyCall(Value *V, IterTy ArgBegin, IterTy ArgEnd, const Query &Q, unsigned MaxRecurse) { Type *Ty = V->getType(); - if (PointerType *PTy = dyn_cast(Ty)) + if (auto *PTy = dyn_cast(Ty)) Ty = PTy->getElementType(); - FunctionType *FTy = cast(Ty); + auto *FTy = cast(Ty); // call undef -> undef // call null -> undef if (isa(V) || isa(V)) return UndefValue::get(FTy->getReturnType()); - Function *F = dyn_cast(V); + auto *F = dyn_cast(V); if (!F) return nullptr; @@ -4321,7 +4321,7 @@ SmallVector ConstantArgs; ConstantArgs.reserve(ArgEnd - ArgBegin); for (IterTy I = ArgBegin, E = ArgEnd; I != E; ++I) { - Constant *C = dyn_cast(*I); + auto *C = dyn_cast(*I); if (!C) return nullptr; ConstantArgs.push_back(C); @@ -4458,7 +4458,7 @@ break; } case Instruction::InsertValue: { - InsertValueInst *IV = cast(I); + auto *IV = cast(I); Result = SimplifyInsertValueInst(IV->getAggregateOperand(), IV->getInsertedValueOperand(), IV->getIndices(), DL, TLI, DT, AC, I); Index: lib/Analysis/LazyCallGraph.cpp =================================================================== --- lib/Analysis/LazyCallGraph.cpp +++ lib/Analysis/LazyCallGraph.cpp @@ -69,7 +69,7 @@ } for (Value *Op : I.operand_values()) - if (Constant *C = dyn_cast(Op)) + if (auto *C = dyn_cast(Op)) if (Visited.insert(C).second) Worklist.push_back(C); } Index: lib/Analysis/LazyValueInfo.cpp =================================================================== --- lib/Analysis/LazyValueInfo.cpp +++ lib/Analysis/LazyValueInfo.cpp @@ -151,7 +151,7 @@ void markConstant(Constant *V) { assert(V && "Marking constant with NULL"); - if (ConstantInt *CI = dyn_cast(V)) { + if (auto *CI = dyn_cast(V)) { markConstantRange(ConstantRange(CI->getValue())); return; } @@ -167,7 +167,7 @@ void markNotConstant(Constant *V) { assert(V && "Marking constant with NULL"); - if (ConstantInt *CI = dyn_cast(V)) { + if (auto *CI = dyn_cast(V)) { markConstantRange(ConstantRange(CI->getValue()+1, CI->getValue())); return; } @@ -677,7 +677,7 @@ LVILatticeVal LazyValueInfoImpl::getBlockValue(Value *Val, BasicBlock *BB) { // If already a constant, there is nothing to compute. - if (Constant *VC = dyn_cast(Val)) + if (auto *VC = dyn_cast(Val)) return LVILatticeVal::get(VC); return TheCache.getCachedValueInfo(Val, BB); @@ -728,11 +728,11 @@ bool LazyValueInfoImpl::solveBlockValueImpl(LVILatticeVal &Res, Value *Val, BasicBlock *BB) { - Instruction *BBI = dyn_cast(Val); + auto *BBI = dyn_cast(Val); if (!BBI || BBI->getParent() != BB) return solveBlockValueNonLocal(Res, Val, BB); - if (PHINode *PN = dyn_cast(BBI)) + if (auto *PN = dyn_cast(BBI)) return solveBlockValuePHINode(Res, PN, BB); if (auto *SI = dyn_cast(BBI)) @@ -747,7 +747,7 @@ // This does mean that we have a sensativity to where the defining // instruction is placed, even if it could legally be hoisted much higher. // That is unfortunate. - PointerType *PT = dyn_cast(BBI->getType()); + auto *PT = dyn_cast(BBI->getType()); if (PT && isKnownNonNull(BBI)) { Res = LVILatticeVal::getNot(ConstantPointerNull::get(PT)); return true; @@ -756,7 +756,7 @@ if (isa(BBI)) return solveBlockValueCast(Res, BBI, BB); - BinaryOperator *BO = dyn_cast(BBI); + auto *BO = dyn_cast(BBI); if (BO && isa(BO->getOperand(1))) return solveBlockValueBinaryOp(Res, BBI, BB); } @@ -768,28 +768,28 @@ } static bool InstructionDereferencesPointer(Instruction *I, Value *Ptr) { - if (LoadInst *L = dyn_cast(I)) { + if (auto *L = dyn_cast(I)) { return L->getPointerAddressSpace() == 0 && GetUnderlyingObject(L->getPointerOperand(), L->getModule()->getDataLayout()) == Ptr; } - if (StoreInst *S = dyn_cast(I)) { + if (auto *S = dyn_cast(I)) { return S->getPointerAddressSpace() == 0 && GetUnderlyingObject(S->getPointerOperand(), S->getModule()->getDataLayout()) == Ptr; } - if (MemIntrinsic *MI = dyn_cast(I)) { + if (auto *MI = dyn_cast(I)) { if (MI->isVolatile()) return false; // FIXME: check whether it has a valuerange that excludes zero? - ConstantInt *Len = dyn_cast(MI->getLength()); + auto *Len = dyn_cast(MI->getLength()); if (!Len || Len->isZero()) return false; if (MI->getDestAddressSpace() == 0) if (GetUnderlyingObject(MI->getRawDest(), MI->getModule()->getDataLayout()) == Ptr) return true; - if (MemTransferInst *MTI = dyn_cast(MI)) + if (auto *MTI = dyn_cast(MI)) if (MTI->getSourceAddressSpace() == 0) if (GetUnderlyingObject(MTI->getRawSource(), MTI->getModule()->getDataLayout()) == Ptr) @@ -828,7 +828,7 @@ // this particular block. if (Val->getType()->isPointerTy() && (isKnownNonNull(Val) || isObjectDereferencedInBlock(Val, BB))) { - PointerType *PTy = cast(Val->getType()); + auto *PTy = cast(Val->getType()); Result = LVILatticeVal::getNot(ConstantPointerNull::get(PTy)); } else { Result = LVILatticeVal::getOverdefined(); @@ -857,7 +857,7 @@ // this particular block. if (Val->getType()->isPointerTy() && isObjectDereferencedInBlock(Val, BB)) { - PointerType *PTy = cast(Val->getType()); + auto *PTy = cast(Val->getType()); Result = LVILatticeVal::getNot(ConstantPointerNull::get(PTy)); } @@ -1031,7 +1031,7 @@ if (auto *ICI = dyn_cast(Cond)) { ICmpInst::Predicate Pred = ICI->getPredicate(); Value *A = ICI->getOperand(0); - if (ConstantInt *CIBase = dyn_cast(ICI->getOperand(1))) { + if (auto *CIBase = dyn_cast(ICI->getOperand(1))) { auto addConstants = [](ConstantInt *A, ConstantInt *B) { assert(A->getType() == B->getType()); return ConstantInt::get(A->getType(), A->getValue() + B->getValue()); @@ -1174,7 +1174,7 @@ LHSRange = LHSVal.getConstantRange(); } - ConstantInt *RHS = cast(BBI->getOperand(1)); + auto *RHS = cast(BBI->getOperand(1)); ConstantRange RHSRange = ConstantRange(RHS->getValue()); // NOTE: We're currently limited by the set of operations that ConstantRange @@ -1227,9 +1227,9 @@ // Calculate the range of values that are allowed by the comparison ConstantRange RHSRange(RHS->getType()->getIntegerBitWidth(), /*isFullSet=*/true); - if (ConstantInt *CI = dyn_cast(RHS)) + if (auto *CI = dyn_cast(RHS)) RHSRange = ConstantRange(CI->getValue()); - else if (Instruction *I = dyn_cast(RHS)) + else if (auto *I = dyn_cast(RHS)) if (auto *Ranges = I->getMetadata(LLVMContext::MD_range)) RHSRange = getConstantRangeFromMetadata(*Ranges); @@ -1255,7 +1255,7 @@ static LVILatticeVal getValueFromConditionImpl(Value *Val, Value *Cond, bool isTrueDest, DenseMap &Visited) { - if (ICmpInst *ICI = dyn_cast(Cond)) + if (auto *ICI = dyn_cast(Cond)) return getValueFromICmpCondition(Val, ICI, isTrueDest); // Handle conditions in the form of (cond1 && cond2), we know that on the @@ -1263,7 +1263,7 @@ if (!isTrueDest) return LVILatticeVal::getOverdefined(); - BinaryOperator *BO = dyn_cast(Cond); + auto *BO = dyn_cast(Cond); if (!BO || BO->getOpcode() != BinaryOperator::And) return LVILatticeVal::getOverdefined(); @@ -1297,7 +1297,7 @@ BasicBlock *BBTo, LVILatticeVal &Result) { // TODO: Handle more complex conditionals. If (v == 0 || v2 < 1) is false, we // know that v != 0. - if (BranchInst *BI = dyn_cast(BBFrom->getTerminator())) { + if (auto *BI = dyn_cast(BBFrom->getTerminator())) { // If this is a conditional branch and only one successor goes to BBTo, then // we may be able to infer something from the condition. if (BI->isConditional() && @@ -1324,7 +1324,7 @@ // If the edge was formed by a switch on the value, then we may know exactly // what it is. - if (SwitchInst *SI = dyn_cast(BBFrom->getTerminator())) { + if (auto *SI = dyn_cast(BBFrom->getTerminator())) { if (SI->getCondition() != Val) return false; @@ -1354,7 +1354,7 @@ BasicBlock *BBTo, LVILatticeVal &Result, Instruction *CxtI) { // If already a constant, there is nothing to compute. - if (Constant *VC = dyn_cast(Val)) { + if (auto *VC = dyn_cast(Val)) { Result = LVILatticeVal::get(VC); return true; } @@ -1593,13 +1593,13 @@ if (Result.isConstant()) { Res = ConstantFoldCompareInstOperands(Pred, Result.getConstant(), C, DL, TLI); - if (ConstantInt *ResCI = dyn_cast(Res)) + if (auto *ResCI = dyn_cast(Res)) return ResCI->isZero() ? LazyValueInfo::False : LazyValueInfo::True; return LazyValueInfo::Unknown; } if (Result.isConstantRange()) { - ConstantInt *CI = dyn_cast(C); + auto *CI = dyn_cast(C); if (!CI) return LazyValueInfo::Unknown; ConstantRange CR = Result.getConstantRange(); Index: lib/Analysis/Lint.cpp =================================================================== --- lib/Analysis/Lint.cpp +++ lib/Analysis/Lint.cpp @@ -228,7 +228,7 @@ visitMemoryReference(I, Callee, MemoryLocation::UnknownSize, 0, nullptr, MemRef::Callee); - if (Function *F = dyn_cast(findValue(Callee, + if (auto *F = dyn_cast(findValue(Callee, /*OffsetOk=*/false))) { Assert(CS.getCallingConv() == F->getCallingConv(), "Undefined behavior: Caller and callee calling convention differ", @@ -295,14 +295,14 @@ } - if (IntrinsicInst *II = dyn_cast(&I)) + if (auto *II = dyn_cast(&I)) switch (II->getIntrinsicID()) { default: break; // TODO: Check more intrinsics case Intrinsic::memcpy: { - MemCpyInst *MCI = cast(&I); + auto *MCI = cast(&I); // TODO: If the size is known, use it. visitMemoryReference(I, MCI->getDest(), MemoryLocation::UnknownSize, MCI->getAlignment(), nullptr, MemRef::Write); @@ -313,7 +313,7 @@ // isn't expressive enough for what we really want to do. Known partial // overlap is not distinguished from the case where nothing is known. uint64_t Size = 0; - if (const ConstantInt *Len = + if (const auto *Len = dyn_cast(findValue(MCI->getLength(), /*OffsetOk=*/false))) if (Len->getValue().isIntN(32)) @@ -324,7 +324,7 @@ break; } case Intrinsic::memmove: { - MemMoveInst *MMI = cast(&I); + auto *MMI = cast(&I); // TODO: If the size is known, use it. visitMemoryReference(I, MMI->getDest(), MemoryLocation::UnknownSize, MMI->getAlignment(), nullptr, MemRef::Write); @@ -333,7 +333,7 @@ break; } case Intrinsic::memset: { - MemSetInst *MSI = cast(&I); + auto *MSI = cast(&I); // TODO: If the size is known, use it. visitMemoryReference(I, MSI->getDest(), MemoryLocation::UnknownSize, MSI->getAlignment(), nullptr, MemRef::Write); @@ -411,7 +411,7 @@ "Unusual: Address one pointer dereference", &I); if (Flags & MemRef::Write) { - if (const GlobalVariable *GV = dyn_cast(UnderlyingObject)) + if (const auto *GV = dyn_cast(UnderlyingObject)) Assert(!GV->isConstant(), "Undefined behavior: Write to read-only memory", &I); Assert(!isa(UnderlyingObject) && @@ -445,14 +445,14 @@ uint64_t BaseSize = MemoryLocation::UnknownSize; unsigned BaseAlign = 0; - if (AllocaInst *AI = dyn_cast(Base)) { + if (auto *AI = dyn_cast(Base)) { Type *ATy = AI->getAllocatedType(); if (!AI->isArrayAllocation() && ATy->isSized()) BaseSize = DL->getTypeAllocSize(ATy); BaseAlign = AI->getAlignment(); if (BaseAlign == 0 && ATy->isSized()) BaseAlign = DL->getABITypeAlignment(ATy); - } else if (GlobalVariable *GV = dyn_cast(Base)) { + } else if (auto *GV = dyn_cast(Base)) { // If the global may be defined differently in another compilation unit // then don't warn about funky memory accesses. if (GV->hasDefinitiveInitializer()) { @@ -505,21 +505,21 @@ } void Lint::visitLShr(BinaryOperator &I) { - if (ConstantInt *CI = dyn_cast(findValue(I.getOperand(1), + if (auto *CI = dyn_cast(findValue(I.getOperand(1), /*OffsetOk=*/false))) Assert(CI->getValue().ult(cast(I.getType())->getBitWidth()), "Undefined result: Shift count out of range", &I); } void Lint::visitAShr(BinaryOperator &I) { - if (ConstantInt *CI = + if (auto *CI = dyn_cast(findValue(I.getOperand(1), /*OffsetOk=*/false))) Assert(CI->getValue().ult(cast(I.getType())->getBitWidth()), "Undefined result: Shift count out of range", &I); } void Lint::visitShl(BinaryOperator &I) { - if (ConstantInt *CI = + if (auto *CI = dyn_cast(findValue(I.getOperand(1), /*OffsetOk=*/false))) Assert(CI->getValue().ult(cast(I.getType())->getBitWidth()), "Undefined result: Shift count out of range", &I); @@ -531,7 +531,7 @@ if (isa(V)) return true; - VectorType *VecTy = dyn_cast(V->getType()); + auto *VecTy = dyn_cast(V->getType()); if (!VecTy) { unsigned BitWidth = V->getType()->getIntegerBitWidth(); APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); @@ -541,7 +541,7 @@ } // Per-component check doesn't work with zeroinitializer - Constant *C = dyn_cast(V); + auto *C = dyn_cast(V); if (!C) return false; @@ -608,14 +608,14 @@ } void Lint::visitExtractElementInst(ExtractElementInst &I) { - if (ConstantInt *CI = dyn_cast(findValue(I.getIndexOperand(), + if (auto *CI = dyn_cast(findValue(I.getIndexOperand(), /*OffsetOk=*/false))) Assert(CI->getValue().ult(I.getVectorOperandType()->getNumElements()), "Undefined result: extractelement index out of range", &I); } void Lint::visitInsertElementInst(InsertElementInst &I) { - if (ConstantInt *CI = dyn_cast(findValue(I.getOperand(2), + if (auto *CI = dyn_cast(findValue(I.getOperand(2), /*OffsetOk=*/false))) Assert(CI->getValue().ult(I.getType()->getNumElements()), "Undefined result: insertelement index out of range", &I); @@ -655,7 +655,7 @@ // TODO: Look through calls with unique return values. // TODO: Look through vector insert/extract/shuffle. V = OffsetOk ? GetUnderlyingObject(V, *DL) : V->stripPointerCasts(); - if (LoadInst *L = dyn_cast(V)) { + if (auto *L = dyn_cast(V)) { BasicBlock::iterator BBI = L->getIterator(); BasicBlock *BB = L->getParent(); SmallPtrSet VisitedBlocks; @@ -670,19 +670,19 @@ if (!BB) break; BBI = BB->end(); } - } else if (PHINode *PN = dyn_cast(V)) { + } else if (auto *PN = dyn_cast(V)) { if (Value *W = PN->hasConstantValue()) if (W != V) return findValueImpl(W, OffsetOk, Visited); - } else if (CastInst *CI = dyn_cast(V)) { + } else if (auto *CI = dyn_cast(V)) { if (CI->isNoopCast(*DL)) return findValueImpl(CI->getOperand(0), OffsetOk, Visited); - } else if (ExtractValueInst *Ex = dyn_cast(V)) { + } else if (auto *Ex = dyn_cast(V)) { if (Value *W = FindInsertedValue(Ex->getAggregateOperand(), Ex->getIndices())) if (W != V) return findValueImpl(W, OffsetOk, Visited); - } else if (ConstantExpr *CE = dyn_cast(V)) { + } else if (auto *CE = dyn_cast(V)) { // Same as above, but for ConstantExpr instead of Instruction. if (Instruction::isCast(CE->getOpcode())) { if (CastInst::isNoopCast(Instruction::CastOps(CE->getOpcode()), @@ -698,7 +698,7 @@ } // As a last resort, try SimplifyInstruction or constant folding. - if (Instruction *Inst = dyn_cast(V)) { + if (auto *Inst = dyn_cast(V)) { if (Value *W = SimplifyInstruction(Inst, *DL, TLI, DT, AC)) return findValueImpl(W, OffsetOk, Visited); } else if (auto *C = dyn_cast(V)) { Index: lib/Analysis/Loads.cpp =================================================================== --- lib/Analysis/Loads.cpp +++ lib/Analysis/Loads.cpp @@ -63,7 +63,7 @@ // malloc may return null. // bitcast instructions are no-ops as far as dereferenceability is concerned. - if (const BitCastOperator *BC = dyn_cast(V)) + if (const auto *BC = dyn_cast(V)) return isDereferenceableAndAlignedPointer(BC->getOperand(0), Align, Size, DL, CtxI, DT, Visited); @@ -77,7 +77,7 @@ } // For GEPs, determine if the indexing lands within the allocated object. - if (const GEPOperator *GEP = dyn_cast(V)) { + if (const auto *GEP = dyn_cast(V)) { const Value *Base = GEP->getPointerOperand(); APInt Offset(DL.getPointerTypeSizeInBits(GEP->getType()), 0); @@ -99,11 +99,11 @@ } // For gc.relocate, look through relocations - if (const GCRelocateInst *RelocateInst = dyn_cast(V)) + if (const auto *RelocateInst = dyn_cast(V)) return isDereferenceableAndAlignedPointer( RelocateInst->getDerivedPtr(), Align, Size, DL, CtxI, DT, Visited); - if (const AddrSpaceCastInst *ASC = dyn_cast(V)) + if (const auto *ASC = dyn_cast(V)) return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, Size, DL, CtxI, DT, Visited); @@ -169,7 +169,7 @@ // value or one of them will have an undefined value. if (isa(A) || isa(A) || isa(A) || isa(A)) - if (const Instruction *BI = dyn_cast(B)) + if (const auto *BI = dyn_cast(B)) if (cast(A)->isIdenticalToWhenDefined(BI)) return true; @@ -211,11 +211,11 @@ Type *BaseType = nullptr; unsigned BaseAlign = 0; - if (const AllocaInst *AI = dyn_cast(Base)) { + if (const auto *AI = dyn_cast(Base)) { // An alloca is safe to load from as load as it is suitably aligned. BaseType = AI->getAllocatedType(); BaseAlign = AI->getAlignment(); - } else if (const GlobalVariable *GV = dyn_cast(Base)) { + } else if (const auto *GV = dyn_cast(Base)) { // Global variables are not necessarily safe to load from if they are // interposed arbitrarily. Their size may change or they may be weak and // require a test to determine if they were in fact provided. @@ -225,7 +225,7 @@ } } - PointerType *AddrTy = cast(V->getType()); + auto *AddrTy = cast(V->getType()); uint64_t LoadSize = DL.getTypeStoreSize(AddrTy->getElementType()); // If we found a base allocated type from either an alloca or global variable, @@ -270,10 +270,10 @@ Value *AccessedPtr; unsigned AccessedAlign; - if (LoadInst *LI = dyn_cast(BBI)) { + if (auto *LI = dyn_cast(BBI)) { AccessedPtr = LI->getPointerOperand(); AccessedAlign = LI->getAlignment(); - } else if (StoreInst *SI = dyn_cast(BBI)) { + } else if (auto *SI = dyn_cast(BBI)) { AccessedPtr = SI->getPointerOperand(); AccessedAlign = SI->getAlignment(); } else @@ -352,7 +352,7 @@ // If this is a load of Ptr, the loaded value is available. // (This is true even if the load is volatile or atomic, although // those cases are unlikely.) - if (LoadInst *LI = dyn_cast(Inst)) + if (auto *LI = dyn_cast(Inst)) if (AreEquivalentAddressValues( LI->getPointerOperand()->stripPointerCasts(), StrippedPtr) && CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, DL)) { @@ -367,7 +367,7 @@ return LI; } - if (StoreInst *SI = dyn_cast(Inst)) { + if (auto *SI = dyn_cast(Inst)) { Value *StorePtr = SI->getPointerOperand()->stripPointerCasts(); // If this is a store through Ptr, the value is available! // (This is true even if the store is volatile or atomic, although Index: lib/Analysis/LoopAccessAnalysis.cpp =================================================================== --- lib/Analysis/LoopAccessAnalysis.cpp +++ lib/Analysis/LoopAccessAnalysis.cpp @@ -221,7 +221,7 @@ if (SE->isLoopInvariant(Sc, Lp)) ScStart = ScEnd = Sc; else { - const SCEVAddRecExpr *AR = dyn_cast(Sc); + const auto *AR = dyn_cast(Sc); assert(AR && "Invalid addrec expression"); const SCEV *Ex = PSE.getBackedgeTakenCount(); @@ -288,7 +288,7 @@ static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J, ScalarEvolution *SE) { const SCEV *Diff = SE->getMinusSCEV(J, I); - const SCEVConstant *C = dyn_cast(Diff); + const auto *C = dyn_cast(Diff); if (!C) return nullptr; @@ -626,7 +626,7 @@ if (PSE.getSE()->isLoopInvariant(PtrScev, L)) return true; - const SCEVAddRecExpr *AR = dyn_cast(PtrScev); + const auto *AR = dyn_cast(PtrScev); if (!AR) return false; @@ -877,7 +877,7 @@ } static bool isInBoundsGep(Value *Ptr) { - if (GetElementPtrInst *GEP = dyn_cast(Ptr)) + if (auto *GEP = dyn_cast(Ptr)) return GEP->isInBounds(); return false; } @@ -947,7 +947,7 @@ const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr); - const SCEVAddRecExpr *AR = dyn_cast(PtrScev); + const auto *AR = dyn_cast(PtrScev); if (Assume && !AR) AR = PSE.getAsAddRec(Ptr); @@ -995,7 +995,7 @@ const SCEV *Step = AR->getStepRecurrence(*PSE.getSE()); // Calculate the pointer stride and check if it is constant. - const SCEVConstant *C = dyn_cast(Step); + const auto *C = dyn_cast(Step); if (!C) { DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr << " SCEV: " << *AR << "\n"); @@ -1051,9 +1051,9 @@ /// Take the address space operand from the Load/Store instruction. /// Returns -1 if this is not a valid Load/Store instruction. static unsigned getAddressSpaceOperand(Value *I) { - if (LoadInst *L = dyn_cast(I)) + if (auto *L = dyn_cast(I)) return L->getPointerAddressSpace(); - if (StoreInst *S = dyn_cast(I)) + if (auto *S = dyn_cast(I)) return S->getPointerAddressSpace(); return -1; } @@ -1090,7 +1090,7 @@ const SCEV *OffsetSCEVA = SE.getConstant(OffsetA); const SCEV *OffsetSCEVB = SE.getConstant(OffsetB); const SCEV *OffsetDeltaSCEV = SE.getMinusSCEV(OffsetSCEVB, OffsetSCEVA); - const SCEVConstant *OffsetDeltaC = dyn_cast(OffsetDeltaSCEV); + const auto *OffsetDeltaC = dyn_cast(OffsetDeltaSCEV); const APInt &OffsetDelta = OffsetDeltaC->getAPInt(); // Check if they are based on the same pointer. That makes the offsets // sufficient. @@ -1292,7 +1292,7 @@ return Dependence::Unknown; } - const SCEVConstant *C = dyn_cast(Dist); + const auto *C = dyn_cast(Dist); if (!C) { DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n"); ShouldRetryWithRuntimeCheck = true; @@ -1843,7 +1843,7 @@ Instruction *Loc) { if (FirstInst) return FirstInst; - if (Instruction *I = dyn_cast(V)) + if (auto *I = dyn_cast(V)) return I->getParent() == Loc->getParent() ? I : nullptr; return nullptr; } @@ -1881,7 +1881,7 @@ << "\n"); // Ptr could be in the loop body. If so, expand a new one at the correct // location. - Instruction *Inst = dyn_cast(Ptr); + auto *Inst = dyn_cast(Ptr); Value *NewPtr = (Inst && TheLoop->contains(Inst)) ? Exp.expandCodeFor(Sc, PtrArithTy, Loc) : Ptr; @@ -1998,9 +1998,9 @@ void LoopAccessInfo::collectStridedAccess(Value *MemAccess) { Value *Ptr = nullptr; - if (LoadInst *LI = dyn_cast(MemAccess)) + if (auto *LI = dyn_cast(MemAccess)) Ptr = LI->getPointerOperand(); - else if (StoreInst *SI = dyn_cast(MemAccess)) + else if (auto *SI = dyn_cast(MemAccess)) Ptr = SI->getPointerOperand(); else return; Index: lib/Analysis/LoopInfo.cpp =================================================================== --- lib/Analysis/LoopInfo.cpp +++ lib/Analysis/LoopInfo.cpp @@ -53,7 +53,7 @@ // bool Loop::isLoopInvariant(const Value *V) const { - if (const Instruction *I = dyn_cast(V)) + if (const auto *I = dyn_cast(V)) return !contains(I); return true; // All non-instructions are loop invariant } @@ -64,7 +64,7 @@ bool Loop::makeLoopInvariant(Value *V, bool &Changed, Instruction *InsertPt) const { - if (Instruction *I = dyn_cast(V)) + if (auto *I = dyn_cast(V)) return makeLoopInvariant(I, Changed, InsertPt); return true; // All non-instructions are loop-invariant. } @@ -128,15 +128,15 @@ // Loop over all of the PHI nodes, looking for a canonical indvar. for (BasicBlock::iterator I = H->begin(); isa(I); ++I) { - PHINode *PN = cast(I); - if (ConstantInt *CI = + auto *PN = cast(I); + if (auto *CI = dyn_cast(PN->getIncomingValueForBlock(Incoming))) if (CI->isNullValue()) - if (Instruction *Inc = + if (auto *Inc = dyn_cast(PN->getIncomingValueForBlock(Backedge))) if (Inc->getOpcode() == Instruction::Add && Inc->getOperand(0) == PN) - if (ConstantInt *CI = dyn_cast(Inc->getOperand(1))) + if (auto *CI = dyn_cast(Inc->getOperand(1))) if (CI->equalsInt(1)) return PN; } @@ -154,9 +154,9 @@ continue; for (const Use &U : I.uses()) { - const Instruction *UI = cast(U.getUser()); + const auto *UI = cast(U.getUser()); const BasicBlock *UserBB = UI->getParent(); - if (const PHINode *P = dyn_cast(UI)) + if (const auto *P = dyn_cast(UI)) UserBB = P->getIncomingBlock(U); // Check the current block, as a fast-path, before checking whether @@ -316,7 +316,7 @@ // and if there is a second DebugLoc in the header we use it as end location // of the loop. for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { - if (DILocation *L = dyn_cast(LoopID->getOperand(i))) { + if (auto *L = dyn_cast(LoopID->getOperand(i))) { if (!Start) Start = DebugLoc(L); else Index: lib/Analysis/LoopPass.cpp =================================================================== --- lib/Analysis/LoopPass.cpp +++ lib/Analysis/LoopPass.cpp @@ -109,7 +109,7 @@ /// deleteSimpleAnalysisValue - Invoke deleteAnalysisValue hook for all passes. void LPPassManager::deleteSimpleAnalysisValue(Value *V, Loop *L) { - if (BasicBlock *BB = dyn_cast(V)) { + if (auto *BB = dyn_cast(V)) { for (Instruction &I : *BB) { deleteSimpleAnalysisValue(&I, L); } Index: lib/Analysis/LoopUnrollAnalyzer.cpp =================================================================== --- lib/Analysis/LoopUnrollAnalyzer.cpp +++ lib/Analysis/LoopUnrollAnalyzer.cpp @@ -84,7 +84,7 @@ else SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, DL); - if (Constant *C = dyn_cast_or_null(SimpleV)) + if (auto *C = dyn_cast_or_null(SimpleV)) SimplifiedValues[&I] = C; if (SimpleV) @@ -107,7 +107,7 @@ if (!GV || !GV->hasDefinitiveInitializer() || !GV->isConstant()) return false; - ConstantDataSequential *CDS = + auto *CDS = dyn_cast(GV->getInitializer()); if (!CDS) return false; @@ -144,7 +144,7 @@ /// Try to simplify cast instruction. bool UnrolledInstAnalyzer::visitCastInst(CastInst &I) { // Propagate constants through casts. - Constant *COp = dyn_cast(I.getOperand(0)); + auto *COp = dyn_cast(I.getOperand(0)); if (!COp) COp = SimplifiedValues.lookup(I.getOperand(0)); @@ -191,8 +191,8 @@ } } - if (Constant *CLHS = dyn_cast(LHS)) { - if (Constant *CRHS = dyn_cast(RHS)) { + if (auto *CLHS = dyn_cast(LHS)) { + if (auto *CRHS = dyn_cast(RHS)) { if (CLHS->getType() == CRHS->getType()) { if (Constant *C = ConstantExpr::getCompare(I.getPredicate(), CLHS, CRHS)) { SimplifiedValues[&I] = C; Index: lib/Analysis/MemDerefPrinter.cpp =================================================================== --- lib/Analysis/MemDerefPrinter.cpp +++ lib/Analysis/MemDerefPrinter.cpp @@ -54,7 +54,7 @@ bool MemDerefPrinter::runOnFunction(Function &F) { const DataLayout &DL = F.getParent()->getDataLayout(); for (auto &I: instructions(F)) { - if (LoadInst *LI = dyn_cast(&I)) { + if (auto *LI = dyn_cast(&I)) { Value *PO = LI->getPointerOperand(); if (isDereferenceablePointer(PO, DL)) Deref.push_back(PO); Index: lib/Analysis/MemoryBuiltins.cpp =================================================================== --- lib/Analysis/MemoryBuiltins.cpp +++ lib/Analysis/MemoryBuiltins.cpp @@ -229,7 +229,7 @@ return nullptr; unsigned ElementSize = DL.getTypeAllocSize(T); - if (StructType *ST = dyn_cast(T)) + if (auto *ST = dyn_cast(T)) ElementSize = DL.getStructLayout(ST)->getSizeInBytes(); // If malloc call's arg can be determined to be a multiple of ElementSize, @@ -257,7 +257,7 @@ // Determine if CallInst has a bitcast use. for (Value::const_user_iterator UI = CI->user_begin(), E = CI->user_end(); UI != E;) - if (const BitCastInst *BCI = dyn_cast(*UI++)) { + if (const auto *BCI = dyn_cast(*UI++)) { MallocType = cast(BCI->getDestTy()); NumOfBitCastUses++; } @@ -308,7 +308,7 @@ /// isFreeCall - Returns non-null if the value is a call to the builtin free() const CallInst *llvm::isFreeCall(const Value *I, const TargetLibraryInfo *TLI) { - const CallInst *CI = dyn_cast(I); + const auto *CI = dyn_cast(I); if (!CI || isa(CI)) return nullptr; Function *Callee = CI->getCalledFunction(); @@ -416,27 +416,27 @@ Zero = APInt::getNullValue(IntTyBits); V = V->stripPointerCasts(); - if (Instruction *I = dyn_cast(V)) { + if (auto *I = dyn_cast(V)) { // If we have already seen this instruction, bail out. Cycles can happen in // unreachable code after constant propagation. if (!SeenInsts.insert(I).second) return unknown(); - if (GEPOperator *GEP = dyn_cast(V)) + if (auto *GEP = dyn_cast(V)) return visitGEPOperator(*GEP); return visit(*I); } - if (Argument *A = dyn_cast(V)) + if (auto *A = dyn_cast(V)) return visitArgument(*A); - if (ConstantPointerNull *P = dyn_cast(V)) + if (auto *P = dyn_cast(V)) return visitConstantPointerNull(*P); - if (GlobalAlias *GA = dyn_cast(V)) + if (auto *GA = dyn_cast(V)) return visitGlobalAlias(*GA); - if (GlobalVariable *GV = dyn_cast(V)) + if (auto *GV = dyn_cast(V)) return visitGlobalVariable(*GV); - if (UndefValue *UV = dyn_cast(V)) + if (auto *UV = dyn_cast(V)) return visitUndefValue(*UV); - if (ConstantExpr *CE = dyn_cast(V)) { + if (auto *CE = dyn_cast(V)) { if (CE->getOpcode() == Instruction::IntToPtr) return unknown(); // clueless if (CE->getOpcode() == Instruction::GetElementPtr) @@ -457,7 +457,7 @@ return std::make_pair(align(Size, I.getAlignment()), Zero); Value *ArraySize = I.getArraySize(); - if (const ConstantInt *C = dyn_cast(ArraySize)) { + if (const auto *C = dyn_cast(ArraySize)) { Size *= C->getValue().zextOrSelf(IntTyBits); return std::make_pair(align(Size, I.getAlignment()), Zero); } @@ -470,7 +470,7 @@ ++ObjectVisitorArgument; return unknown(); } - PointerType *PT = cast(A.getType()); + auto *PT = cast(A.getType()); APInt Size(IntTyBits, DL.getTypeAllocSize(PT->getElementType())); return std::make_pair(align(Size, A.getParamAlignment()), Zero); } @@ -489,7 +489,7 @@ // Strndup limits strlen. if (FnData->FstParam > 0) { - ConstantInt *Arg = + auto *Arg = dyn_cast(CS.getArgument(FnData->FstParam)); if (!Arg) return unknown(); @@ -501,7 +501,7 @@ return std::make_pair(Size, Zero); } - ConstantInt *Arg = dyn_cast(CS.getArgument(FnData->FstParam)); + auto *Arg = dyn_cast(CS.getArgument(FnData->FstParam)); if (!Arg) return unknown(); @@ -689,7 +689,7 @@ // Always generate code immediately before the instruction being // processed, so that the generated code dominates the same BBs. BuilderTy::InsertPointGuard Guard(Builder); - if (Instruction *I = dyn_cast(V)) + if (auto *I = dyn_cast(V)) Builder.SetInsertPoint(I); // Now compute the size and offset. @@ -700,9 +700,9 @@ // can occur in dead code. if (!SeenVals.insert(V).second) { Result = unknown(); - } else if (GEPOperator *GEP = dyn_cast(V)) { + } else if (auto *GEP = dyn_cast(V)) { Result = visitGEPOperator(*GEP); - } else if (Instruction *I = dyn_cast(V)) { + } else if (auto *I = dyn_cast(V)) { Result = visit(*I); } else if (isa(V) || (isa(V) && Index: lib/Analysis/MemoryDependenceAnalysis.cpp =================================================================== --- lib/Analysis/MemoryDependenceAnalysis.cpp +++ lib/Analysis/MemoryDependenceAnalysis.cpp @@ -102,7 +102,7 @@ /// instruction. static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc, const TargetLibraryInfo &TLI) { - if (const LoadInst *LI = dyn_cast(Inst)) { + if (const auto *LI = dyn_cast(Inst)) { if (LI->isUnordered()) { Loc = MemoryLocation::get(LI); return MRI_Ref; @@ -115,7 +115,7 @@ return MRI_ModRef; } - if (const StoreInst *SI = dyn_cast(Inst)) { + if (const auto *SI = dyn_cast(Inst)) { if (SI->isUnordered()) { Loc = MemoryLocation::get(SI); return MRI_Mod; @@ -128,7 +128,7 @@ return MRI_ModRef; } - if (const VAArgInst *V = dyn_cast(Inst)) { + if (const auto *V = dyn_cast(Inst)) { Loc = MemoryLocation::get(V); return MRI_ModRef; } @@ -139,7 +139,7 @@ return MRI_Mod; } - if (const IntrinsicInst *II = dyn_cast(Inst)) { + if (const auto *II = dyn_cast(Inst)) { AAMDNodes AAInfo; switch (II->getIntrinsicID()) { @@ -310,11 +310,11 @@ } static bool isVolatile(Instruction *Inst) { - if (LoadInst *LI = dyn_cast(Inst)) + if (auto *LI = dyn_cast(Inst)) return LI->isVolatile(); - else if (StoreInst *SI = dyn_cast(Inst)) + else if (auto *SI = dyn_cast(Inst)) return SI->isVolatile(); - else if (AtomicCmpXchgInst *AI = dyn_cast(Inst)) + else if (auto *AI = dyn_cast(Inst)) return AI->isVolatile(); return false; } @@ -431,7 +431,7 @@ // forwarding, but any mayalias write can be assumed to be noalias. // Arguably, this logic should be pushed inside AliasAnalysis itself. if (isLoad && QueryInst) { - LoadInst *LI = dyn_cast(QueryInst); + auto *LI = dyn_cast(QueryInst); if (LI && LI->getMetadata(LLVMContext::MD_invariant_load) != nullptr) isInvariantLoad = true; } @@ -464,7 +464,7 @@ while (ScanIt != BB->begin()) { Instruction *Inst = &*--ScanIt; - if (IntrinsicInst *II = dyn_cast(Inst)) + if (auto *II = dyn_cast(Inst)) // Debug intrinsics don't (and can't) cause dependencies. if (isa(II)) continue; @@ -475,7 +475,7 @@ if (!*Limit) return MemDepResult::getUnknown(); - if (IntrinsicInst *II = dyn_cast(Inst)) { + if (auto *II = dyn_cast(Inst)) { // If we reach a lifetime begin or end marker, then the query ends here // because the value is undefined. if (II->getIntrinsicID() == Intrinsic::lifetime_start) { @@ -494,7 +494,7 @@ // One exception is atomic loads: a value can depend on an atomic load that // it does not alias with when this atomic load indicates that another // thread may be accessing the location. - if (LoadInst *LI = dyn_cast(Inst)) { + if (auto *LI = dyn_cast(Inst)) { // While volatile access cannot be eliminated, they do not have to clobber // non-aliasing locations, as normal accesses, for example, can be safely @@ -562,7 +562,7 @@ return MemDepResult::getDef(Inst); } - if (StoreInst *SI = dyn_cast(Inst)) { + if (auto *SI = dyn_cast(Inst)) { // Atomic stores have complications involved. // A Monotonic store is OK if the query inst is itself not atomic. // FIXME: This is overly conservative. @@ -625,7 +625,7 @@ // fence. As a result, we look past it when finding a dependency for // loads. DSE uses this to find preceeding stores to delete and thus we // can't bypass the fence if the query instruction is a store. - if (FenceInst *FI = dyn_cast(Inst)) + if (auto *FI = dyn_cast(Inst)) if (isLoad && FI->getOrdering() == AtomicOrdering::Release) continue; @@ -878,9 +878,9 @@ // atomic instructions which are unordered. // TODO: Handle ordered instructions auto isOrdered = [](Instruction *Inst) { - if (LoadInst *LI = dyn_cast(Inst)) { + if (auto *LI = dyn_cast(Inst)) { return !LI->isUnordered(); - } else if (StoreInst *SI = dyn_cast(Inst)) { + } else if (auto *SI = dyn_cast(Inst)) { return !SI->isUnordered(); } return false; Index: lib/Analysis/MemoryLocation.cpp =================================================================== --- lib/Analysis/MemoryLocation.cpp +++ lib/Analysis/MemoryLocation.cpp @@ -66,7 +66,7 @@ MemoryLocation MemoryLocation::getForSource(const MemTransferInst *MTI) { uint64_t Size = UnknownSize; - if (ConstantInt *C = dyn_cast(MTI->getLength())) + if (auto *C = dyn_cast(MTI->getLength())) Size = C->getValue().getZExtValue(); // memcpy/memmove can have AA tags. For memcpy, they apply @@ -79,7 +79,7 @@ MemoryLocation MemoryLocation::getForDest(const MemIntrinsic *MTI) { uint64_t Size = UnknownSize; - if (ConstantInt *C = dyn_cast(MTI->getLength())) + if (auto *C = dyn_cast(MTI->getLength())) Size = C->getValue().getZExtValue(); // memcpy/memmove can have AA tags. For memcpy, they apply @@ -98,7 +98,7 @@ const Value *Arg = CS.getArgument(ArgIdx); // We may be able to produce an exact size for known intrinsics. - if (const IntrinsicInst *II = dyn_cast(CS.getInstruction())) { + if (const auto *II = dyn_cast(CS.getInstruction())) { const DataLayout &DL = II->getModule()->getDataLayout(); switch (II->getIntrinsicID()) { @@ -109,7 +109,7 @@ case Intrinsic::memmove: assert((ArgIdx == 0 || ArgIdx == 1) && "Invalid argument index for memory intrinsic"); - if (ConstantInt *LenCI = dyn_cast(II->getArgOperand(2))) + if (auto *LenCI = dyn_cast(II->getArgOperand(2))) return MemoryLocation(Arg, LenCI->getZExtValue(), AATags); break; @@ -149,7 +149,7 @@ "Invalid argument index for memset_pattern16"); if (ArgIdx == 1) return MemoryLocation(Arg, 16, AATags); - if (const ConstantInt *LenCI = dyn_cast(CS.getArgument(2))) + if (const auto *LenCI = dyn_cast(CS.getArgument(2))) return MemoryLocation(Arg, LenCI->getZExtValue(), AATags); } // FIXME: Handle memset_pattern4 and memset_pattern8 also. Index: lib/Analysis/ModuleSummaryAnalysis.cpp =================================================================== --- lib/Analysis/ModuleSummaryAnalysis.cpp +++ lib/Analysis/ModuleSummaryAnalysis.cpp @@ -48,7 +48,7 @@ ImmutableCallSite CS(U); for (const auto &OI : U->operands()) { - const User *Operand = dyn_cast(OI); + const auto *Operand = dyn_cast(OI); if (!Operand) continue; if (isa(Operand)) Index: lib/Analysis/ObjCARCInstKind.cpp =================================================================== --- lib/Analysis/ObjCARCInstKind.cpp +++ lib/Analysis/ObjCARCInstKind.cpp @@ -98,7 +98,7 @@ const Argument *A0 = &*AI++; if (AI == AE) { // Argument is a pointer. - PointerType *PTy = dyn_cast(A0->getType()); + auto *PTy = dyn_cast(A0->getType()); if (!PTy) return ARCInstKind::CallOrUser; @@ -126,7 +126,7 @@ .Default(ARCInstKind::CallOrUser); // Argument is i8** - if (PointerType *Pte = dyn_cast(ETy)) + if (auto *Pte = dyn_cast(ETy)) if (Pte->getElementType()->isIntegerTy(8)) return StringSwitch(F->getName()) .Case("objc_loadWeakRetained", ARCInstKind::LoadWeakRetained) @@ -141,10 +141,10 @@ // Two arguments, first is i8**. const Argument *A1 = &*AI++; if (AI == AE) - if (PointerType *PTy = dyn_cast(A0->getType())) - if (PointerType *Pte = dyn_cast(PTy->getElementType())) + if (auto *PTy = dyn_cast(A0->getType())) + if (auto *Pte = dyn_cast(PTy->getElementType())) if (Pte->getElementType()->isIntegerTy(8)) - if (PointerType *PTy1 = dyn_cast(A1->getType())) { + if (auto *PTy1 = dyn_cast(A1->getType())) { Type *ETy1 = PTy1->getElementType(); // Second argument is i8* if (ETy1->isIntegerTy(8)) @@ -154,7 +154,7 @@ .Case("objc_storeStrong", ARCInstKind::StoreStrong) .Default(ARCInstKind::CallOrUser); // Second argument is i8**. - if (PointerType *Pte1 = dyn_cast(ETy1)) + if (auto *Pte1 = dyn_cast(ETy1)) if (Pte1->getElementType()->isIntegerTy(8)) return StringSwitch(F->getName()) .Case("objc_moveWeak", ARCInstKind::MoveWeak) @@ -235,7 +235,7 @@ /// \brief Determine what kind of construct V is. ARCInstKind llvm::objcarc::GetARCInstKind(const Value *V) { - if (const Instruction *I = dyn_cast(V)) { + if (const auto *I = dyn_cast(V)) { // Any instruction other than bitcast and gep with a pointer operand have a // use of an objc pointer. Bitcasts, GEPs, Selects, PHIs transfer a pointer // to a subsequent use, rather than using it themselves, in this sense. @@ -244,7 +244,7 @@ // not interesting to examine. switch (I->getOpcode()) { case Instruction::Call: { - const CallInst *CI = cast(I); + const auto *CI = cast(I); // See if we have a function that we know something about. if (const Function *F = CI->getCalledFunction()) { ARCInstKind Class = GetFunctionClass(F); Index: lib/Analysis/PHITransAddr.cpp =================================================================== --- lib/Analysis/PHITransAddr.cpp +++ lib/Analysis/PHITransAddr.cpp @@ -57,7 +57,7 @@ static bool VerifySubExpr(Value *Expr, SmallVectorImpl &InstInputs) { // If this is a non-instruction value, there is nothing to do. - Instruction *I = dyn_cast(Expr); + auto *I = dyn_cast(Expr); if (!I) return true; // If it's an instruction, it is either in Tmp or its operands recursively @@ -114,14 +114,14 @@ bool PHITransAddr::IsPotentiallyPHITranslatable() const { // If the input value is not an instruction, or if it is not defined in CurBB, // then we don't need to phi translate it. - Instruction *Inst = dyn_cast(Addr); + auto *Inst = dyn_cast(Addr); return !Inst || CanPHITrans(Inst); } static void RemoveInstInputs(Value *V, SmallVectorImpl &InstInputs) { - Instruction *I = dyn_cast(V); + auto *I = dyn_cast(V); if (!I) return; // If the instruction is in the InstInputs list, remove it. @@ -135,7 +135,7 @@ // Otherwise, it must have instruction inputs itself. Zap them recursively. for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { - if (Instruction *Op = dyn_cast(I->getOperand(i))) + if (auto *Op = dyn_cast(I->getOperand(i))) RemoveInstInputs(Op, InstInputs); } } @@ -144,7 +144,7 @@ BasicBlock *PredBB, const DominatorTree *DT) { // If this is a non-instruction value, it can't require PHI translation. - Instruction *Inst = dyn_cast(V); + auto *Inst = dyn_cast(V); if (!Inst) return V; // Determine whether 'Inst' is an input to our PHI translatable expression. @@ -165,7 +165,7 @@ InstInputs.erase(find(InstInputs, Inst)); // If this is a PHI, go ahead and translate it. - if (PHINode *PN = dyn_cast(Inst)) + if (auto *PN = dyn_cast(Inst)) return AddAsInput(PN->getIncomingValueForBlock(PredBB)); // If this is a non-phi value, and it is analyzable, we can incorporate it @@ -176,7 +176,7 @@ // All instruction operands are now inputs (and of course, they may also be // defined in this block, so they may need to be phi translated themselves. for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) - if (Instruction *Op = dyn_cast(Inst->getOperand(i))) + if (auto *Op = dyn_cast(Inst->getOperand(i))) InstInputs.push_back(Op); } @@ -184,7 +184,7 @@ // or because we just incorporated it into the expression). See if its // operands need to be phi translated, and if so, reconstruct it. - if (CastInst *Cast = dyn_cast(Inst)) { + if (auto *Cast = dyn_cast(Inst)) { if (!isSafeToSpeculativelyExecute(Cast)) return nullptr; Value *PHIIn = PHITranslateSubExpr(Cast->getOperand(0), CurBB, PredBB, DT); if (!PHIIn) return nullptr; @@ -194,14 +194,14 @@ // Find an available version of this cast. // Constants are trivial to find. - if (Constant *C = dyn_cast(PHIIn)) + if (auto *C = dyn_cast(PHIIn)) return AddAsInput(ConstantExpr::getCast(Cast->getOpcode(), C, Cast->getType())); // Otherwise we have to see if a casted version of the incoming pointer // is available. If so, we can use it, otherwise we have to fail. for (User *U : PHIIn->users()) { - if (CastInst *CastI = dyn_cast(U)) + if (auto *CastI = dyn_cast(U)) if (CastI->getOpcode() == Cast->getOpcode() && CastI->getType() == Cast->getType() && (!DT || DT->dominates(CastI->getParent(), PredBB))) @@ -211,7 +211,7 @@ } // Handle getelementptr with at least one PHI translatable operand. - if (GetElementPtrInst *GEP = dyn_cast(Inst)) { + if (auto *GEP = dyn_cast(Inst)) { SmallVector GEPOps; bool AnyChanged = false; for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) { @@ -237,7 +237,7 @@ // Scan to see if we have this GEP available. Value *APHIOp = GEPOps[0]; for (User *U : APHIOp->users()) { - if (GetElementPtrInst *GEPI = dyn_cast(U)) + if (auto *GEPI = dyn_cast(U)) if (GEPI->getType() == GEP->getType() && GEPI->getNumOperands() == GEPOps.size() && GEPI->getParent()->getParent() == CurBB->getParent() && @@ -261,9 +261,9 @@ if (!LHS) return nullptr; // If the PHI translated LHS is an add of a constant, fold the immediates. - if (BinaryOperator *BOp = dyn_cast(LHS)) + if (auto *BOp = dyn_cast(LHS)) if (BOp->getOpcode() == Instruction::Add) - if (ConstantInt *CI = dyn_cast(BOp->getOperand(1))) { + if (auto *CI = dyn_cast(BOp->getOperand(1))) { LHS = BOp->getOperand(0); RHS = ConstantExpr::getAdd(RHS, CI); isNSW = isNUW = false; @@ -289,7 +289,7 @@ // Otherwise, see if we have this add available somewhere. for (User *U : LHS->users()) { - if (BinaryOperator *BO = dyn_cast(U)) + if (auto *BO = dyn_cast(U)) if (BO->getOpcode() == Instruction::Add && BO->getOperand(0) == LHS && BO->getOperand(1) == RHS && BO->getParent()->getParent() == CurBB->getParent() && @@ -323,7 +323,7 @@ if (MustDominate) // Make sure the value is live in the predecessor. - if (Instruction *Inst = dyn_cast_or_null(Addr)) + if (auto *Inst = dyn_cast_or_null(Addr)) if (!DT->dominates(Inst->getParent(), PredBB)) Addr = nullptr; @@ -377,7 +377,7 @@ return nullptr; // Handle cast of PHI translatable value. - if (CastInst *Cast = dyn_cast(Inst)) { + if (auto *Cast = dyn_cast(Inst)) { if (!isSafeToSpeculativelyExecute(Cast)) return nullptr; Value *OpVal = InsertPHITranslatedSubExpr(Cast->getOperand(0), CurBB, PredBB, DT, NewInsts); @@ -393,7 +393,7 @@ } // Handle getelementptr with at least one PHI operand. - if (GetElementPtrInst *GEP = dyn_cast(Inst)) { + if (auto *GEP = dyn_cast(Inst)) { SmallVector GEPOps; BasicBlock *CurBB = GEP->getParent(); for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) { Index: lib/Analysis/ScalarEvolution.cpp =================================================================== --- lib/Analysis/ScalarEvolution.cpp +++ lib/Analysis/ScalarEvolution.cpp @@ -152,28 +152,28 @@ cast(this)->getValue()->printAsOperand(OS, false); return; case scTruncate: { - const SCEVTruncateExpr *Trunc = cast(this); + const auto *Trunc = cast(this); const SCEV *Op = Trunc->getOperand(); OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Trunc->getType() << ")"; return; } case scZeroExtend: { - const SCEVZeroExtendExpr *ZExt = cast(this); + const auto *ZExt = cast(this); const SCEV *Op = ZExt->getOperand(); OS << "(zext " << *Op->getType() << " " << *Op << " to " << *ZExt->getType() << ")"; return; } case scSignExtend: { - const SCEVSignExtendExpr *SExt = cast(this); + const auto *SExt = cast(this); const SCEV *Op = SExt->getOperand(); OS << "(sext " << *Op->getType() << " " << *Op << " to " << *SExt->getType() << ")"; return; } case scAddRecExpr: { - const SCEVAddRecExpr *AR = cast(this); + const auto *AR = cast(this); OS << "{" << *AR->getOperand(0); for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i) OS << ",+," << *AR->getOperand(i); @@ -193,7 +193,7 @@ case scMulExpr: case scUMaxExpr: case scSMaxExpr: { - const SCEVNAryExpr *NAry = cast(this); + const auto *NAry = cast(this); const char *OpStr = nullptr; switch (NAry->getSCEVType()) { case scAddExpr: OpStr = " + "; break; @@ -220,12 +220,12 @@ return; } case scUDivExpr: { - const SCEVUDivExpr *UDiv = cast(this); + const auto *UDiv = cast(this); OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")"; return; } case scUnknown: { - const SCEVUnknown *U = cast(this); + const auto *U = cast(this); Type *AllocTy; if (U->isSizeOf(AllocTy)) { OS << "sizeof(" << *AllocTy << ")"; @@ -282,29 +282,29 @@ } bool SCEV::isZero() const { - if (const SCEVConstant *SC = dyn_cast(this)) + if (const auto *SC = dyn_cast(this)) return SC->getValue()->isZero(); return false; } bool SCEV::isOne() const { - if (const SCEVConstant *SC = dyn_cast(this)) + if (const auto *SC = dyn_cast(this)) return SC->getValue()->isOne(); return false; } bool SCEV::isAllOnesValue() const { - if (const SCEVConstant *SC = dyn_cast(this)) + if (const auto *SC = dyn_cast(this)) return SC->getValue()->isAllOnesValue(); return false; } bool SCEV::isNonConstantNegative() const { - const SCEVMulExpr *Mul = dyn_cast(this); + const auto *Mul = dyn_cast(this); if (!Mul) return false; // If there is a constant factor, it will be first. - const SCEVConstant *SC = dyn_cast(Mul->getOperand(0)); + const auto *SC = dyn_cast(Mul->getOperand(0)); if (!SC) return false; // Return true if the value is negative, this matches things like (-42 * V). @@ -335,7 +335,7 @@ const SCEV * ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) { - IntegerType *ITy = cast(getEffectiveSCEVType(Ty)); + auto *ITy = cast(getEffectiveSCEVType(Ty)); return getConstant(ConstantInt::get(ITy, V, isSigned)); } @@ -392,13 +392,13 @@ } bool SCEVUnknown::isSizeOf(Type *&AllocTy) const { - if (ConstantExpr *VCE = dyn_cast(getValue())) + if (auto *VCE = dyn_cast(getValue())) if (VCE->getOpcode() == Instruction::PtrToInt) - if (ConstantExpr *CE = dyn_cast(VCE->getOperand(0))) + if (auto *CE = dyn_cast(VCE->getOperand(0))) if (CE->getOpcode() == Instruction::GetElementPtr && CE->getOperand(0)->isNullValue() && CE->getNumOperands() == 2) - if (ConstantInt *CI = dyn_cast(CE->getOperand(1))) + if (auto *CI = dyn_cast(CE->getOperand(1))) if (CI->isOne()) { AllocTy = cast(CE->getOperand(0)->getType()) ->getElementType(); @@ -409,18 +409,18 @@ } bool SCEVUnknown::isAlignOf(Type *&AllocTy) const { - if (ConstantExpr *VCE = dyn_cast(getValue())) + if (auto *VCE = dyn_cast(getValue())) if (VCE->getOpcode() == Instruction::PtrToInt) - if (ConstantExpr *CE = dyn_cast(VCE->getOperand(0))) + if (auto *CE = dyn_cast(VCE->getOperand(0))) if (CE->getOpcode() == Instruction::GetElementPtr && CE->getOperand(0)->isNullValue()) { Type *Ty = cast(CE->getOperand(0)->getType())->getElementType(); - if (StructType *STy = dyn_cast(Ty)) + if (auto *STy = dyn_cast(Ty)) if (!STy->isPacked() && CE->getNumOperands() == 3 && CE->getOperand(1)->isNullValue()) { - if (ConstantInt *CI = dyn_cast(CE->getOperand(2))) + if (auto *CI = dyn_cast(CE->getOperand(2))) if (CI->isOne() && STy->getNumElements() == 2 && STy->getElementType(0)->isIntegerTy(1)) { @@ -434,9 +434,9 @@ } bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const { - if (ConstantExpr *VCE = dyn_cast(getValue())) + if (auto *VCE = dyn_cast(getValue())) if (VCE->getOpcode() == Instruction::PtrToInt) - if (ConstantExpr *CE = dyn_cast(VCE->getOperand(0))) + if (auto *CE = dyn_cast(VCE->getOperand(0))) if (CE->getOpcode() == Instruction::GetElementPtr && CE->getNumOperands() == 3 && CE->getOperand(0)->isNullValue() && @@ -575,8 +575,8 @@ // so that (a + b) and (b + a) don't end up as different expressions. switch (static_cast(LType)) { case scUnknown: { - const SCEVUnknown *LU = cast(LHS); - const SCEVUnknown *RU = cast(RHS); + const auto *LU = cast(LHS); + const auto *RU = cast(RHS); SmallSet, 8> EqCache; int X = CompareValueComplexity(EqCache, LI, LU->getValue(), RU->getValue(), @@ -587,8 +587,8 @@ } case scConstant: { - const SCEVConstant *LC = cast(LHS); - const SCEVConstant *RC = cast(RHS); + const auto *LC = cast(LHS); + const auto *RC = cast(RHS); // Compare constant values. const APInt &LA = LC->getAPInt(); @@ -600,8 +600,8 @@ } case scAddRecExpr: { - const SCEVAddRecExpr *LA = cast(LHS); - const SCEVAddRecExpr *RA = cast(RHS); + const auto *LA = cast(LHS); + const auto *RA = cast(RHS); // Compare addrec loop depths. const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); @@ -631,8 +631,8 @@ case scMulExpr: case scSMaxExpr: case scUMaxExpr: { - const SCEVNAryExpr *LC = cast(LHS); - const SCEVNAryExpr *RC = cast(RHS); + const auto *LC = cast(LHS); + const auto *RC = cast(RHS); // Lexicographically compare n-ary expressions. unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands(); @@ -652,8 +652,8 @@ } case scUDivExpr: { - const SCEVUDivExpr *LC = cast(LHS); - const SCEVUDivExpr *RC = cast(RHS); + const auto *LC = cast(LHS); + const auto *RC = cast(RHS); // Lexicographically compare udiv expressions. int X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getLHS(), RC->getLHS(), @@ -670,8 +670,8 @@ case scTruncate: case scZeroExtend: case scSignExtend: { - const SCEVCastExpr *LC = cast(LHS); - const SCEVCastExpr *RC = cast(RHS); + const auto *LC = cast(LHS); + const auto *RC = cast(RHS); // Compare cast expressions by operand. int X = CompareSCEVComplexity(EqCacheSCEV, LI, LC->getOperand(), @@ -795,7 +795,7 @@ } // Split the Denominator when it is a product. - if (const SCEVMulExpr *T = dyn_cast(Denominator)) { + if (const auto *T = dyn_cast(Denominator)) { const SCEV *Q, *R; *Quotient = Numerator; for (const SCEV *Op : T->operands()) { @@ -831,7 +831,7 @@ void visitCouldNotCompute(const SCEVCouldNotCompute *Numerator) {} void visitConstant(const SCEVConstant *Numerator) { - if (const SCEVConstant *D = dyn_cast(Denominator)) { + if (const auto *D = dyn_cast(Denominator)) { APInt NumeratorVal = Numerator->getAPInt(); APInt DenominatorVal = D->getAPInt(); uint32_t NumeratorBW = NumeratorVal.getBitWidth(); @@ -1150,25 +1150,25 @@ if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; // Fold if the operand is constant. - if (const SCEVConstant *SC = dyn_cast(Op)) + if (const auto *SC = dyn_cast(Op)) return getConstant( cast(ConstantExpr::getTrunc(SC->getValue(), Ty))); // trunc(trunc(x)) --> trunc(x) - if (const SCEVTruncateExpr *ST = dyn_cast(Op)) + if (const auto *ST = dyn_cast(Op)) return getTruncateExpr(ST->getOperand(), Ty); // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing - if (const SCEVSignExtendExpr *SS = dyn_cast(Op)) + if (const auto *SS = dyn_cast(Op)) return getTruncateOrSignExtend(SS->getOperand(), Ty); // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing - if (const SCEVZeroExtendExpr *SZ = dyn_cast(Op)) + if (const auto *SZ = dyn_cast(Op)) return getTruncateOrZeroExtend(SZ->getOperand(), Ty); // trunc(x1+x2+...+xN) --> trunc(x1)+trunc(x2)+...+trunc(xN) if we can // eliminate all the truncates, or we replace other casts with truncates. - if (const SCEVAddExpr *SA = dyn_cast(Op)) { + if (const auto *SA = dyn_cast(Op)) { SmallVector Operands; bool hasTrunc = false; for (unsigned i = 0, e = SA->getNumOperands(); i != e && !hasTrunc; ++i) { @@ -1184,7 +1184,7 @@ // trunc(x1*x2*...*xN) --> trunc(x1)*trunc(x2)*...*trunc(xN) if we can // eliminate all the truncates, or we replace other casts with truncates. - if (const SCEVMulExpr *SM = dyn_cast(Op)) { + if (const auto *SM = dyn_cast(Op)) { SmallVector Operands; bool hasTrunc = false; for (unsigned i = 0, e = SM->getNumOperands(); i != e && !hasTrunc; ++i) { @@ -1199,7 +1199,7 @@ } // If the input value is a chrec scev, truncate the chrec's operands. - if (const SCEVAddRecExpr *AddRec = dyn_cast(Op)) { + if (const auto *AddRec = dyn_cast(Op)) { SmallVector Operands; for (const SCEV *Op : AddRec->operands()) Operands.push_back(getTruncateExpr(Op, Ty)); @@ -1318,7 +1318,7 @@ const SCEV *Step = AR->getStepRecurrence(*SE); // Check for a simple looking step prior to loop entry. - const SCEVAddExpr *SA = dyn_cast(Start); + const auto *SA = dyn_cast(Start); if (!SA) return nullptr; @@ -1340,7 +1340,7 @@ auto PreStartFlags = ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW); const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags); - const SCEVAddRecExpr *PreAR = dyn_cast( + const auto *PreAR = dyn_cast( SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap)); // "{S,+,X} is /" and "the backedge is taken at least once" implies @@ -1438,7 +1438,7 @@ // non-constant `Start` and do a general SCEV subtraction to compute // `PreStart` below. // - const SCEVConstant *StartC = dyn_cast(Start); + const auto *StartC = dyn_cast(Start); if (!StartC) return false; @@ -1480,12 +1480,12 @@ Ty = getEffectiveSCEVType(Ty); // Fold if the operand is constant. - if (const SCEVConstant *SC = dyn_cast(Op)) + if (const auto *SC = dyn_cast(Op)) return getConstant( cast(ConstantExpr::getZExt(SC->getValue(), Ty))); // zext(zext(x)) --> zext(x) - if (const SCEVZeroExtendExpr *SZ = dyn_cast(Op)) + if (const auto *SZ = dyn_cast(Op)) return getZeroExtendExpr(SZ->getOperand(), Ty); // Before doing any expensive analysis, check to see if we've already @@ -1498,7 +1498,7 @@ if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; // zext(trunc(x)) --> zext(x) or x or trunc(x) - if (const SCEVTruncateExpr *ST = dyn_cast(Op)) { + if (const auto *ST = dyn_cast(Op)) { // It's possible the bits taken off by the truncate were all zero bits. If // so, we should be able to simplify this further. const SCEV *X = ST->getOperand(); @@ -1514,7 +1514,7 @@ // did not overflow the old, smaller, value, we can zero extend all of the // operands (often constants). This allows analysis of something like // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; } - if (const SCEVAddRecExpr *AR = dyn_cast(Op)) + if (const auto *AR = dyn_cast(Op)) if (AR->isAffine()) { const SCEV *Start = AR->getStart(); const SCEV *Step = AR->getStepRecurrence(*this); @@ -1676,16 +1676,16 @@ Ty = getEffectiveSCEVType(Ty); // Fold if the operand is constant. - if (const SCEVConstant *SC = dyn_cast(Op)) + if (const auto *SC = dyn_cast(Op)) return getConstant( cast(ConstantExpr::getSExt(SC->getValue(), Ty))); // sext(sext(x)) --> sext(x) - if (const SCEVSignExtendExpr *SS = dyn_cast(Op)) + if (const auto *SS = dyn_cast(Op)) return getSignExtendExpr(SS->getOperand(), Ty); // sext(zext(x)) --> zext(x) - if (const SCEVZeroExtendExpr *SZ = dyn_cast(Op)) + if (const auto *SZ = dyn_cast(Op)) return getZeroExtendExpr(SZ->getOperand(), Ty); // Before doing any expensive analysis, check to see if we've already @@ -1698,7 +1698,7 @@ if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S; // sext(trunc(x)) --> sext(x) or x or trunc(x) - if (const SCEVTruncateExpr *ST = dyn_cast(Op)) { + if (const auto *ST = dyn_cast(Op)) { // It's possible the bits taken off by the truncate were all sign bits. If // so, we should be able to simplify this further. const SCEV *X = ST->getOperand(); @@ -1741,7 +1741,7 @@ // did not overflow the old, smaller, value, we can sign extend all of the // operands (often constants). This allows analysis of something like // this: for (signed char X = 0; X < 100; ++X) { int Y = X; } - if (const SCEVAddRecExpr *AR = dyn_cast(Op)) + if (const auto *AR = dyn_cast(Op)) if (AR->isAffine()) { const SCEV *Start = AR->getStart(); const SCEV *Step = AR->getStepRecurrence(*this); @@ -1906,12 +1906,12 @@ Ty = getEffectiveSCEVType(Ty); // Sign-extend negative constants. - if (const SCEVConstant *SC = dyn_cast(Op)) + if (const auto *SC = dyn_cast(Op)) if (SC->getAPInt().isNegative()) return getSignExtendExpr(Op, Ty); // Peel off a truncate cast. - if (const SCEVTruncateExpr *T = dyn_cast(Op)) { + if (const auto *T = dyn_cast(Op)) { const SCEV *NewOp = T->getOperand(); if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty)) return getAnyExtendExpr(NewOp, Ty); @@ -1929,7 +1929,7 @@ return SExt; // Force the cast to be folded into the operands of an addrec. - if (const SCEVAddRecExpr *AR = dyn_cast(Op)) { + if (const auto *AR = dyn_cast(Op)) { SmallVector Ops; for (const SCEV *Op : AR->operands()) Ops.push_back(getAnyExtendExpr(Op, Ty)); @@ -1979,7 +1979,7 @@ // Iterate over the add operands. They are sorted, with constants first. unsigned i = 0; - while (const SCEVConstant *C = dyn_cast(Ops[i])) { + while (const auto *C = dyn_cast(Ops[i])) { ++i; // Pull a buried constant out to the outside. if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero()) @@ -1990,13 +1990,13 @@ // Next comes everything else. We're especially interested in multiplies // here, but they're in the middle, so just visit the rest with one loop. for (; i != NumOperands; ++i) { - const SCEVMulExpr *Mul = dyn_cast(Ops[i]); + const auto *Mul = dyn_cast(Ops[i]); if (Mul && isa(Mul->getOperand(0))) { APInt NewScale = Scale * cast(Mul->getOperand(0))->getAPInt(); if (Mul->getNumOperands() == 2 && isa(Mul->getOperand(1))) { // A multiplication of a constant with another add; recurse. - const SCEVAddExpr *Add = cast(Mul->getOperand(1)); + const auto *Add = cast(Mul->getOperand(1)); Interesting |= CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant, Add->op_begin(), Add->getNumOperands(), @@ -2109,10 +2109,10 @@ // If there are any constants, fold them together. unsigned Idx = 0; - if (const SCEVConstant *LHSC = dyn_cast(Ops[0])) { + if (const auto *LHSC = dyn_cast(Ops[0])) { ++Idx; assert(Idx < Ops.size()); - while (const SCEVConstant *RHSC = dyn_cast(Ops[Idx])) { + while (const auto *RHSC = dyn_cast(Ops[Idx])) { // We found two constants, fold them together! Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt()); if (Ops.size() == 2) return Ops[0]; @@ -2158,7 +2158,7 @@ // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n) // if the contents of the resulting outer trunc fold to something simple. for (; Idx < Ops.size() && isa(Ops[Idx]); ++Idx) { - const SCEVTruncateExpr *Trunc = cast(Ops[Idx]); + const auto *Trunc = cast(Ops[Idx]); Type *DstType = Trunc->getType(); Type *SrcType = Trunc->getOperand()->getType(); SmallVector LargeOps; @@ -2166,18 +2166,18 @@ // Check all the operands to see if they can be represented in the // source type of the truncate. for (unsigned i = 0, e = Ops.size(); i != e; ++i) { - if (const SCEVTruncateExpr *T = dyn_cast(Ops[i])) { + if (const auto *T = dyn_cast(Ops[i])) { if (T->getOperand()->getType() != SrcType) { Ok = false; break; } LargeOps.push_back(T->getOperand()); - } else if (const SCEVConstant *C = dyn_cast(Ops[i])) { + } else if (const auto *C = dyn_cast(Ops[i])) { LargeOps.push_back(getAnyExtendExpr(C, SrcType)); - } else if (const SCEVMulExpr *M = dyn_cast(Ops[i])) { + } else if (const auto *M = dyn_cast(Ops[i])) { SmallVector LargeMulOps; for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { - if (const SCEVTruncateExpr *T = + if (const auto *T = dyn_cast(M->getOperand(j))) { if (T->getOperand()->getType() != SrcType) { Ok = false; @@ -2214,7 +2214,7 @@ // If there are add operands they would be next. if (Idx < Ops.size()) { bool DeletedAdd = false; - while (const SCEVAddExpr *Add = dyn_cast(Ops[Idx])) { + while (const auto *Add = dyn_cast(Ops[Idx])) { // If we have an add, expand the add operands onto the end of the operands // list. Ops.erase(Ops.begin()+Idx); @@ -2275,7 +2275,7 @@ // something is not already an operand of the multiply. If so, merge it into // the multiply. for (; Idx < Ops.size() && isa(Ops[Idx]); ++Idx) { - const SCEVMulExpr *Mul = cast(Ops[Idx]); + const auto *Mul = cast(Ops[Idx]); for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { const SCEV *MulOpSCEV = Mul->getOperand(MulOp); if (isa(MulOpSCEV)) @@ -2311,7 +2311,7 @@ for (unsigned OtherMulIdx = Idx+1; OtherMulIdx < Ops.size() && isa(Ops[OtherMulIdx]); ++OtherMulIdx) { - const SCEVMulExpr *OtherMul = cast(Ops[OtherMulIdx]); + const auto *OtherMul = cast(Ops[OtherMulIdx]); // If MulOp occurs in OtherMul, we can fold the two multiplies // together. for (unsigned OMulOp = 0, e = OtherMul->getNumOperands(); @@ -2355,7 +2355,7 @@ // Scan all of the other operands to this add and add them to the vector if // they are loop invariant w.r.t. the recurrence. SmallVector LIOps; - const SCEVAddRecExpr *AddRec = cast(Ops[Idx]); + const auto *AddRec = cast(Ops[Idx]); const Loop *AddRecLoop = AddRec->getLoop(); for (unsigned i = 0, e = Ops.size(); i != e; ++i) if (isLoopInvariant(Ops[i], AddRecLoop)) { @@ -2520,11 +2520,11 @@ // If there are any constants, fold them together. unsigned Idx = 0; - if (const SCEVConstant *LHSC = dyn_cast(Ops[0])) { + if (const auto *LHSC = dyn_cast(Ops[0])) { // C1*(C2+V) -> C1*C2 + C1*V if (Ops.size() == 2) - if (const SCEVAddExpr *Add = dyn_cast(Ops[1])) + if (const auto *Add = dyn_cast(Ops[1])) // If any of Add's ops are Adds or Muls with a constant, // apply this transformation as well. if (Add->getNumOperands() == 2) @@ -2533,7 +2533,7 @@ getMulExpr(LHSC, Add->getOperand(1))); ++Idx; - while (const SCEVConstant *RHSC = dyn_cast(Ops[Idx])) { + while (const auto *RHSC = dyn_cast(Ops[Idx])) { // We found two constants, fold them together! ConstantInt *Fold = ConstantInt::get(getContext(), LHSC->getAPInt() * RHSC->getAPInt()); @@ -2554,7 +2554,7 @@ // If we have a mul by -1 of an add, try distributing the -1 among the // add operands. if (Ops.size() == 2) { - if (const SCEVAddExpr *Add = dyn_cast(Ops[1])) { + if (const auto *Add = dyn_cast(Ops[1])) { SmallVector NewOps; bool AnyFolded = false; for (const SCEV *AddOp : Add->operands()) { @@ -2587,7 +2587,7 @@ // If there are mul operands inline them all into this expression. if (Idx < Ops.size()) { bool DeletedMul = false; - while (const SCEVMulExpr *Mul = dyn_cast(Ops[Idx])) { + while (const auto *Mul = dyn_cast(Ops[Idx])) { if (Ops.size() > MulOpsInlineThreshold) break; // If we have an mul, expand the mul operands onto the end of the operands @@ -2615,7 +2615,7 @@ // Scan all of the other operands to this mul and add them to the vector if // they are loop invariant w.r.t. the recurrence. SmallVector LIOps; - const SCEVAddRecExpr *AddRec = cast(Ops[Idx]); + const auto *AddRec = cast(Ops[Idx]); const Loop *AddRecLoop = AddRec->getLoop(); for (unsigned i = 0, e = Ops.size(); i != e; ++i) if (isLoopInvariant(Ops[i], AddRecLoop)) { @@ -2671,7 +2671,7 @@ for (unsigned OtherIdx = Idx+1; OtherIdx != Ops.size() && isa(Ops[OtherIdx]); ++OtherIdx) { - const SCEVAddRecExpr *OtherAddRec = + const auto *OtherAddRec = dyn_cast(Ops[OtherIdx]); if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop) continue; @@ -2749,7 +2749,7 @@ getEffectiveSCEVType(RHS->getType()) && "SCEVUDivExpr operand types don't match!"); - if (const SCEVConstant *RHSC = dyn_cast(RHS)) { + if (const auto *RHSC = dyn_cast(RHS)) { if (RHSC->getValue()->equalsInt(1)) return LHS; // X udiv 1 --> x // If the denominator is zero, the result of the udiv is undefined. Don't @@ -2768,8 +2768,8 @@ ++MaxShiftAmt; IntegerType *ExtTy = IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt); - if (const SCEVAddRecExpr *AR = dyn_cast(LHS)) - if (const SCEVConstant *Step = + if (const auto *AR = dyn_cast(LHS)) + if (const auto *Step = dyn_cast(AR->getStepRecurrence(*this))) { // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded. const APInt &StepInt = Step->getAPInt(); @@ -2787,7 +2787,7 @@ /// Get a canonical UDivExpr for a recurrence. /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0. // We can currently only fold X%N if X is constant. - const SCEVConstant *StartC = dyn_cast(AR->getStart()); + const auto *StartC = dyn_cast(AR->getStart()); if (StartC && !DivInt.urem(StepInt) && getZeroExtendExpr(AR, ExtTy) == getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), @@ -2801,7 +2801,7 @@ } } // (A*B)/C --> A*(B/C) if safe and B/C can be folded. - if (const SCEVMulExpr *M = dyn_cast(LHS)) { + if (const auto *M = dyn_cast(LHS)) { SmallVector Operands; for (const SCEV *Op : M->operands()) Operands.push_back(getZeroExtendExpr(Op, ExtTy)); @@ -2819,7 +2819,7 @@ } } // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. - if (const SCEVAddExpr *A = dyn_cast(LHS)) { + if (const auto *A = dyn_cast(LHS)) { SmallVector Operands; for (const SCEV *Op : A->operands()) Operands.push_back(getZeroExtendExpr(Op, ExtTy)); @@ -2838,7 +2838,7 @@ } // Fold if both operands are constant. - if (const SCEVConstant *LHSC = dyn_cast(LHS)) { + if (const auto *LHSC = dyn_cast(LHS)) { Constant *LHSCV = LHSC->getValue(); Constant *RHSCV = RHSC->getValue(); return getConstant(cast(ConstantExpr::getUDiv(LHSCV, @@ -2883,11 +2883,11 @@ // just deal with u/exact (multiply, constant). See SCEVDivision towards the // end of this file for inspiration. - const SCEVMulExpr *Mul = dyn_cast(LHS); + const auto *Mul = dyn_cast(LHS); if (!Mul) return getUDivExpr(LHS, RHS); - if (const SCEVConstant *RHSCst = dyn_cast(RHS)) { + if (const auto *RHSCst = dyn_cast(RHS)) { // If the mulexpr multiplies by a constant, then that constant must be the // first element of the mulexpr. if (const auto *LHSCst = dyn_cast(Mul->getOperand(0))) { @@ -2937,7 +2937,7 @@ SCEV::NoWrapFlags Flags) { SmallVector Operands; Operands.push_back(Start); - if (const SCEVAddRecExpr *StepChrec = dyn_cast(Step)) + if (const auto *StepChrec = dyn_cast(Step)) if (StepChrec->getLoop() == L) { Operands.append(StepChrec->op_begin(), StepChrec->op_end()); return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW)); @@ -2977,7 +2977,7 @@ Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); // Canonicalize nested AddRecs in by nesting them in order of loop depth. - if (const SCEVAddRecExpr *NestedAR = dyn_cast(Operands[0])) { + if (const auto *NestedAR = dyn_cast(Operands[0])) { const Loop *NestedLoop = NestedAR->getLoop(); if (L->contains(NestedLoop) ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) @@ -3062,7 +3062,7 @@ Type *CurTy = ArrayType::get(GEP->getSourceElementType(), 0); for (const SCEV *IndexExpr : IndexExprs) { // Compute the (potentially symbolic) offset in bytes for this index. - if (StructType *STy = dyn_cast(CurTy)) { + if (auto *STy = dyn_cast(CurTy)) { // For a struct, add the member offset. ConstantInt *Index = cast(IndexExpr)->getValue(); unsigned FieldNo = Index->getZExtValue(); @@ -3115,10 +3115,10 @@ // If there are any constants, fold them together. unsigned Idx = 0; - if (const SCEVConstant *LHSC = dyn_cast(Ops[0])) { + if (const auto *LHSC = dyn_cast(Ops[0])) { ++Idx; assert(Idx < Ops.size()); - while (const SCEVConstant *RHSC = dyn_cast(Ops[Idx])) { + while (const auto *RHSC = dyn_cast(Ops[Idx])) { // We found two constants, fold them together! ConstantInt *Fold = ConstantInt::get( getContext(), APIntOps::smax(LHSC->getAPInt(), RHSC->getAPInt())); @@ -3149,7 +3149,7 @@ // onto our operand list, and recurse to simplify. if (Idx < Ops.size()) { bool DeletedSMax = false; - while (const SCEVSMaxExpr *SMax = dyn_cast(Ops[Idx])) { + while (const auto *SMax = dyn_cast(Ops[Idx])) { Ops.erase(Ops.begin()+Idx); Ops.append(SMax->op_begin(), SMax->op_end()); DeletedSMax = true; @@ -3216,10 +3216,10 @@ // If there are any constants, fold them together. unsigned Idx = 0; - if (const SCEVConstant *LHSC = dyn_cast(Ops[0])) { + if (const auto *LHSC = dyn_cast(Ops[0])) { ++Idx; assert(Idx < Ops.size()); - while (const SCEVConstant *RHSC = dyn_cast(Ops[Idx])) { + while (const auto *RHSC = dyn_cast(Ops[Idx])) { // We found two constants, fold them together! ConstantInt *Fold = ConstantInt::get( getContext(), APIntOps::umax(LHSC->getAPInt(), RHSC->getAPInt())); @@ -3250,7 +3250,7 @@ // onto our operand list, and recurse to simplify. if (Idx < Ops.size()) { bool DeletedUMax = false; - while (const SCEVUMaxExpr *UMax = dyn_cast(Ops[Idx])) { + while (const auto *UMax = dyn_cast(Ops[Idx])) { Ops.erase(Ops.begin()+Idx); Ops.append(UMax->op_begin(), UMax->op_end()); DeletedUMax = true; @@ -3513,7 +3513,7 @@ /// const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags) { - if (const SCEVConstant *VC = dyn_cast(V)) + if (const auto *VC = dyn_cast(V)) return getConstant( cast(ConstantExpr::getNeg(VC->getValue()))); @@ -3525,7 +3525,7 @@ /// Return a SCEV corresponding to ~V = -1-V const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) { - if (const SCEVConstant *VC = dyn_cast(V)) + if (const auto *VC = dyn_cast(V)) return getConstant( cast(ConstantExpr::getNot(VC->getValue()))); @@ -3684,9 +3684,9 @@ if (!V->getType()->isPointerTy()) return V; - if (const SCEVCastExpr *Cast = dyn_cast(V)) { + if (const auto *Cast = dyn_cast(V)) { return getPointerBase(Cast->getOperand()); - } else if (const SCEVNAryExpr *NAry = dyn_cast(V)) { + } else if (const auto *NAry = dyn_cast(V)) { const SCEV *PtrOp = nullptr; for (const SCEV *NAryOp : NAry->operands()) { if (NAryOp->getType()->isPointerTy()) { @@ -3912,7 +3912,7 @@ case Instruction::LShr: // Turn logical shift right of a constant into a unsigned divide. - if (ConstantInt *SA = dyn_cast(Op->getOperand(1))) { + if (auto *SA = dyn_cast(Op->getOperand(1))) { uint32_t BitWidth = cast(Op->getType())->getBitWidth(); // If the shift count is not less than the bitwidth, the result of @@ -4020,7 +4020,7 @@ // If the value coming around the backedge is an add with the symbolic // value we just inserted, then we found a simple induction variable! - if (const SCEVAddExpr *Add = dyn_cast(BEValue)) { + if (const auto *Add = dyn_cast(BEValue)) { // If there is a single occurrence of the symbolic value, replace it // with a recurrence. unsigned FoundIndex = Add->getNumOperands(); @@ -4053,7 +4053,7 @@ if (BO->IsNSW) Flags = setFlags(Flags, SCEV::FlagNSW); } - } else if (GEPOperator *GEP = dyn_cast(BEValueV)) { + } else if (auto *GEP = dyn_cast(BEValueV)) { // If the increment is an inbounds GEP, then we know the address // space cannot be wrapped around. We cannot make any guarantee // about signed or unsigned overflow because pointers are @@ -4406,26 +4406,26 @@ uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { - if (const SCEVConstant *C = dyn_cast(S)) + if (const auto *C = dyn_cast(S)) return C->getAPInt().countTrailingZeros(); - if (const SCEVTruncateExpr *T = dyn_cast(S)) + if (const auto *T = dyn_cast(S)) return std::min(GetMinTrailingZeros(T->getOperand()), (uint32_t)getTypeSizeInBits(T->getType())); - if (const SCEVZeroExtendExpr *E = dyn_cast(S)) { + if (const auto *E = dyn_cast(S)) { uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? getTypeSizeInBits(E->getType()) : OpRes; } - if (const SCEVSignExtendExpr *E = dyn_cast(S)) { + if (const auto *E = dyn_cast(S)) { uint32_t OpRes = GetMinTrailingZeros(E->getOperand()); return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ? getTypeSizeInBits(E->getType()) : OpRes; } - if (const SCEVAddExpr *A = dyn_cast(S)) { + if (const auto *A = dyn_cast(S)) { // The result is the min of all operands results. uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) @@ -4433,7 +4433,7 @@ return MinOpRes; } - if (const SCEVMulExpr *M = dyn_cast(S)) { + if (const auto *M = dyn_cast(S)) { // The result is the sum of all operands results. uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0)); uint32_t BitWidth = getTypeSizeInBits(M->getType()); @@ -4444,7 +4444,7 @@ return SumOpRes; } - if (const SCEVAddRecExpr *A = dyn_cast(S)) { + if (const auto *A = dyn_cast(S)) { // The result is the min of all operands results. uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0)); for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i) @@ -4452,7 +4452,7 @@ return MinOpRes; } - if (const SCEVSMaxExpr *M = dyn_cast(S)) { + if (const auto *M = dyn_cast(S)) { // The result is the min of all operands results. uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) @@ -4460,7 +4460,7 @@ return MinOpRes; } - if (const SCEVUMaxExpr *M = dyn_cast(S)) { + if (const auto *M = dyn_cast(S)) { // The result is the min of all operands results. uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0)); for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i) @@ -4468,7 +4468,7 @@ return MinOpRes; } - if (const SCEVUnknown *U = dyn_cast(S)) { + if (const auto *U = dyn_cast(S)) { // For a SCEVUnknown, ask ValueTracking. unsigned BitWidth = getTypeSizeInBits(U->getType()); APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); @@ -4483,7 +4483,7 @@ /// Helper method to assign a range to V from metadata present in the IR. static Optional GetRangeFromMetadata(Value *V) { - if (Instruction *I = dyn_cast(V)) + if (auto *I = dyn_cast(V)) if (MDNode *MD = I->getMetadata(LLVMContext::MD_range)) return getConstantRangeFromMetadata(*MD); @@ -4505,7 +4505,7 @@ if (I != Cache.end()) return I->second; - if (const SCEVConstant *C = dyn_cast(S)) + if (const auto *C = dyn_cast(S)) return setRange(C, SignHint, ConstantRange(C->getAPInt())); unsigned BitWidth = getTypeSizeInBits(S->getType()); @@ -4525,64 +4525,64 @@ APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1); } - if (const SCEVAddExpr *Add = dyn_cast(S)) { + if (const auto *Add = dyn_cast(S)) { ConstantRange X = getRange(Add->getOperand(0), SignHint); for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) X = X.add(getRange(Add->getOperand(i), SignHint)); return setRange(Add, SignHint, ConservativeResult.intersectWith(X)); } - if (const SCEVMulExpr *Mul = dyn_cast(S)) { + if (const auto *Mul = dyn_cast(S)) { ConstantRange X = getRange(Mul->getOperand(0), SignHint); for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) X = X.multiply(getRange(Mul->getOperand(i), SignHint)); return setRange(Mul, SignHint, ConservativeResult.intersectWith(X)); } - if (const SCEVSMaxExpr *SMax = dyn_cast(S)) { + if (const auto *SMax = dyn_cast(S)) { ConstantRange X = getRange(SMax->getOperand(0), SignHint); for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) X = X.smax(getRange(SMax->getOperand(i), SignHint)); return setRange(SMax, SignHint, ConservativeResult.intersectWith(X)); } - if (const SCEVUMaxExpr *UMax = dyn_cast(S)) { + if (const auto *UMax = dyn_cast(S)) { ConstantRange X = getRange(UMax->getOperand(0), SignHint); for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) X = X.umax(getRange(UMax->getOperand(i), SignHint)); return setRange(UMax, SignHint, ConservativeResult.intersectWith(X)); } - if (const SCEVUDivExpr *UDiv = dyn_cast(S)) { + if (const auto *UDiv = dyn_cast(S)) { ConstantRange X = getRange(UDiv->getLHS(), SignHint); ConstantRange Y = getRange(UDiv->getRHS(), SignHint); return setRange(UDiv, SignHint, ConservativeResult.intersectWith(X.udiv(Y))); } - if (const SCEVZeroExtendExpr *ZExt = dyn_cast(S)) { + if (const auto *ZExt = dyn_cast(S)) { ConstantRange X = getRange(ZExt->getOperand(), SignHint); return setRange(ZExt, SignHint, ConservativeResult.intersectWith(X.zeroExtend(BitWidth))); } - if (const SCEVSignExtendExpr *SExt = dyn_cast(S)) { + if (const auto *SExt = dyn_cast(S)) { ConstantRange X = getRange(SExt->getOperand(), SignHint); return setRange(SExt, SignHint, ConservativeResult.intersectWith(X.signExtend(BitWidth))); } - if (const SCEVTruncateExpr *Trunc = dyn_cast(S)) { + if (const auto *Trunc = dyn_cast(S)) { ConstantRange X = getRange(Trunc->getOperand(), SignHint); return setRange(Trunc, SignHint, ConservativeResult.intersectWith(X.truncate(BitWidth))); } - if (const SCEVAddRecExpr *AddRec = dyn_cast(S)) { + if (const auto *AddRec = dyn_cast(S)) { // If there's no unsigned wrap, the value will never be less than its // initial value. if (AddRec->hasNoUnsignedWrap()) - if (const SCEVConstant *C = dyn_cast(AddRec->getStart())) + if (const auto *C = dyn_cast(AddRec->getStart())) if (!C->getValue()->isZero()) ConservativeResult = ConservativeResult.intersectWith( ConstantRange(C->getAPInt(), APInt(BitWidth, 0))); @@ -4630,7 +4630,7 @@ return setRange(AddRec, SignHint, ConservativeResult); } - if (const SCEVUnknown *U = dyn_cast(S)) { + if (const auto *U = dyn_cast(S)) { // Check if the IR explicitly contains !range metadata. Optional MDRange = GetRangeFromMetadata(U->getValue()); if (MDRange.hasValue()) @@ -4845,7 +4845,7 @@ SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { if (isa(V)) return SCEV::FlagAnyWrap; - const BinaryOperator *BinOp = cast(V); + const auto *BinOp = cast(V); // Return early if there are no flags to propagate to the SCEV. SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap; @@ -5014,23 +5014,23 @@ if (!isSCEVable(V->getType())) return getUnknown(V); - if (Instruction *I = dyn_cast(V)) { + if (auto *I = dyn_cast(V)) { // Don't attempt to analyze instructions in blocks that aren't // reachable. Such instructions don't matter, and they aren't required // to obey basic rules for definitions dominating uses which this // analysis depends on. if (!DT.isReachableFromEntry(I->getParent())) return getUnknown(V); - } else if (ConstantInt *CI = dyn_cast(V)) + } else if (auto *CI = dyn_cast(V)) return getConstant(CI); else if (isa(V)) return getZero(V->getType()); - else if (GlobalAlias *GA = dyn_cast(V)) + else if (auto *GA = dyn_cast(V)) return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee()); else if (!isa(V)) return getUnknown(V); - Operator *U = cast(V); + auto *U = cast(V); if (auto BO = MatchBinaryOp(U, DT)) { switch (BO->Opcode) { case Instruction::Add: { @@ -5123,7 +5123,7 @@ case Instruction::And: // For an expression like x&255 that merely masks off the high bits, // use zext(trunc(x)) as the SCEV expression. - if (ConstantInt *CI = dyn_cast(BO->RHS)) { + if (auto *CI = dyn_cast(BO->RHS)) { if (CI->isNullValue()) return getSCEV(BO->RHS); if (CI->isAllOnesValue()) @@ -5164,7 +5164,7 @@ // // In order for this transformation to be safe, the LHS must be of the // form X*(2^n) and the Or constant must be less than 2^n. - if (ConstantInt *CI = dyn_cast(BO->RHS)) { + if (auto *CI = dyn_cast(BO->RHS)) { const SCEV *LHS = getSCEV(BO->LHS); const APInt &CIVal = CI->getValue(); if (GetMinTrailingZeros(LHS) >= @@ -5173,8 +5173,8 @@ const SCEV *S = getAddExpr(LHS, getSCEV(CI)); // If the LHS of the add was an addrec and it has no-wrap flags, // transfer the no-wrap flags, since an or won't introduce a wrap. - if (const SCEVAddRecExpr *NewAR = dyn_cast(S)) { - const SCEVAddRecExpr *OldAR = cast(LHS); + if (const auto *NewAR = dyn_cast(S)) { + const auto *OldAR = cast(LHS); const_cast(NewAR)->setNoWrapFlags( OldAR->getNoWrapFlags()); } @@ -5184,7 +5184,7 @@ break; case Instruction::Xor: - if (ConstantInt *CI = dyn_cast(BO->RHS)) { + if (auto *CI = dyn_cast(BO->RHS)) { // If the RHS of xor is -1, then this is a not operation. if (CI->isAllOnesValue()) return getNotSCEV(getSCEV(BO->LHS)); @@ -5194,10 +5194,10 @@ // the case where instcombine has trimmed non-demanded bits out // of an xor with -1. if (auto *LBO = dyn_cast(BO->LHS)) - if (ConstantInt *LCI = dyn_cast(LBO->getOperand(1))) + if (auto *LCI = dyn_cast(LBO->getOperand(1))) if (LBO->getOpcode() == Instruction::And && LCI->getValue() == CI->getValue()) - if (const SCEVZeroExtendExpr *Z = + if (const auto *Z = dyn_cast(getSCEV(BO->LHS))) { Type *UTy = BO->LHS->getType(); const SCEV *Z0 = Z->getOperand(); @@ -5224,7 +5224,7 @@ case Instruction::Shl: // Turn shift left of a constant amount into a multiply. - if (ConstantInt *SA = dyn_cast(BO->RHS)) { + if (auto *SA = dyn_cast(BO->RHS)) { uint32_t BitWidth = cast(SA->getType())->getBitWidth(); // If the shift count is not less than the bitwidth, the result of @@ -5252,8 +5252,8 @@ case Instruction::AShr: // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression. - if (ConstantInt *CI = dyn_cast(BO->RHS)) - if (Operator *L = dyn_cast(BO->LHS)) + if (auto *CI = dyn_cast(BO->RHS)) + if (auto *L = dyn_cast(BO->LHS)) if (L->getOpcode() == Instruction::Shl && L->getOperand(1) == BO->RHS) { uint64_t BitWidth = getTypeSizeInBits(BO->LHS->getType()); @@ -5357,7 +5357,7 @@ assert(ExitingBlock && "Must pass a non-null exiting block!"); assert(L->isLoopExiting(ExitingBlock) && "Exiting block must actually branch out of the loop!"); - const SCEVConstant *ExitCount = + const auto *ExitCount = dyn_cast(getExitCount(L, ExitingBlock)); return getConstantTripCount(ExitCount); } @@ -5402,10 +5402,10 @@ const SCEV *TCMul = getAddExpr(ExitCount, getOne(ExitCount->getType())); // FIXME: SCEV distributes multiplication as V1*C1 + V2*C1. We could attempt // to factor simple cases. - if (const SCEVMulExpr *Mul = dyn_cast(TCMul)) + if (const auto *Mul = dyn_cast(TCMul)) TCMul = Mul->getOperand(0); - const SCEVConstant *MulC = dyn_cast(TCMul); + const auto *MulC = dyn_cast(TCMul); if (!MulC) return 1; @@ -5455,7 +5455,7 @@ // Push all Loop-header PHIs onto the Worklist stack. for (BasicBlock::iterator I = Header->begin(); - PHINode *PN = dyn_cast(I); ++I) + auto *PN = dyn_cast(I); ++I) Worklist.push_back(PN); } @@ -5535,7 +5535,7 @@ eraseValueFromMap(It->first); forgetMemoizedResults(Old); } - if (PHINode *PN = dyn_cast(I)) + if (auto *PN = dyn_cast(I)) ConstantEvolutionLoopExitValue.erase(PN); } @@ -5580,7 +5580,7 @@ if (It != ValueExprMap.end()) { eraseValueFromMap(It->first); forgetMemoizedResults(It->second); - if (PHINode *PN = dyn_cast(I)) + if (auto *PN = dyn_cast(I)) ConstantEvolutionLoopExitValue.erase(PN); } @@ -5596,7 +5596,7 @@ } void ScalarEvolution::forgetValue(Value *V) { - Instruction *I = dyn_cast(V); + auto *I = dyn_cast(V); if (!I) return; // Drop information about expressions based on loop-header PHIs. @@ -5614,7 +5614,7 @@ if (It != ValueExprMap.end()) { eraseValueFromMap(It->first); forgetMemoizedResults(It->second); - if (PHINode *PN = dyn_cast(I)) + if (auto *PN = dyn_cast(I)) ConstantEvolutionLoopExitValue.erase(PN); } @@ -5867,7 +5867,7 @@ bool IsOnlyExit = (L->getExitingBlock() != nullptr); TerminatorInst *Term = ExitingBlock->getTerminator(); - if (BranchInst *BI = dyn_cast(Term)) { + if (auto *BI = dyn_cast(Term)) { assert(BI->isConditional() && "If unconditional, it can't be in loop!"); // Proceed to the next level to examine the exit condition expression. return computeExitLimitFromCond( @@ -5875,7 +5875,7 @@ /*ControlsExit=*/IsOnlyExit, AllowPredicates); } - if (SwitchInst *SI = dyn_cast(Term)) + if (auto *SI = dyn_cast(Term)) return computeExitLimitFromSingleExitSwitch(L, SI, Exit, /*ControlsExit=*/IsOnlyExit); @@ -5890,7 +5890,7 @@ bool ControlsExit, bool AllowPredicates) { // Check if the controlling expression for this loop is an And or Or. - if (BinaryOperator *BO = dyn_cast(ExitCond)) { + if (auto *BO = dyn_cast(ExitCond)) { if (BO->getOpcode() == Instruction::And) { // Recurse on the operands of the and. bool EitherMayExit = L->contains(TBB); @@ -5984,7 +5984,7 @@ // With an icmp, it may be feasible to compute an exact backedge-taken count. // Proceed to the next level to examine the icmp. - if (ICmpInst *ExitCondICmp = dyn_cast(ExitCond)) { + if (auto *ExitCondICmp = dyn_cast(ExitCond)) { ExitLimit EL = computeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB, ControlsExit); if (EL.hasFullInfo() || !AllowPredicates) @@ -5999,7 +5999,7 @@ // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to // preserve the CFG and is temporarily leaving constant conditions // in place. - if (ConstantInt *CI = dyn_cast(ExitCond)) { + if (auto *CI = dyn_cast(ExitCond)) { if (L->contains(FBB) == !CI->getZExtValue()) // The backedge is always taken. return getCouldNotCompute(); @@ -6028,8 +6028,8 @@ Cond = ExitCond->getInversePredicate(); // Handle common loops like: for (X = "string"; *X; ++X) - if (LoadInst *LI = dyn_cast(ExitCond->getOperand(0))) - if (Constant *RHS = dyn_cast(ExitCond->getOperand(1))) { + if (auto *LI = dyn_cast(ExitCond->getOperand(0))) + if (auto *RHS = dyn_cast(ExitCond->getOperand(1))) { ExitLimit ItCnt = computeLoadConstantCompareExitLimit(LI, RHS, L, Cond); if (ItCnt.hasAnyInfo()) @@ -6056,8 +6056,8 @@ // If we have a comparison of a chrec against a constant, try to use value // ranges to answer this query. - if (const SCEVConstant *RHSC = dyn_cast(RHS)) - if (const SCEVAddRecExpr *AddRec = dyn_cast(LHS)) + if (const auto *RHSC = dyn_cast(RHS)) + if (const auto *AddRec = dyn_cast(LHS)) if (AddRec->getLoop() == L) { // Form the constant range. ConstantRange CompRange = @@ -6159,12 +6159,12 @@ // Check to see if the loaded pointer is a getelementptr of a global. // TODO: Use SCEV instead of manually grubbing with GEPs. - GetElementPtrInst *GEP = dyn_cast(LI->getOperand(0)); + auto *GEP = dyn_cast(LI->getOperand(0)); if (!GEP) return getCouldNotCompute(); // Make sure that it is really a constant global we are gepping, with an // initializer, and make sure the first IDX is really 0. - GlobalVariable *GV = dyn_cast(GEP->getOperand(0)); + auto *GV = dyn_cast(GEP->getOperand(0)); if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || GEP->getNumOperands() < 3 || !isa(GEP->getOperand(1)) || !cast(GEP->getOperand(1))->isNullValue()) @@ -6175,7 +6175,7 @@ std::vector Indexes; unsigned VarIdxNum = 0; for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) - if (ConstantInt *CI = dyn_cast(GEP->getOperand(i))) { + if (auto *CI = dyn_cast(GEP->getOperand(i))) { Indexes.push_back(CI); } else if (!isa(GEP->getOperand(i))) { if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's. @@ -6195,7 +6195,7 @@ // We can only recognize very limited forms of loop index expressions, in // particular, only affine AddRec's like {C1,+,C2}. - const SCEVAddRecExpr *IdxExpr = dyn_cast(Idx); + const auto *IdxExpr = dyn_cast(Idx); if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) || !isa(IdxExpr->getOperand(0)) || !isa(IdxExpr->getOperand(1))) @@ -6227,7 +6227,7 @@ ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit( Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) { - ConstantInt *RHS = dyn_cast(RHSV); + auto *RHS = dyn_cast(RHSV); if (!RHS) return getCouldNotCompute(); @@ -6377,7 +6377,7 @@ isa(I)) return true; - if (const CallInst *CI = dyn_cast(I)) + if (const auto *CI = dyn_cast(I)) if (const Function *F = CI->getCalledFunction()) return canConstantFoldCallTo(F); return false; @@ -6412,10 +6412,10 @@ for (Value *Op : UseInst->operands()) { if (isa(Op)) continue; - Instruction *OpInst = dyn_cast(Op); + auto *OpInst = dyn_cast(Op); if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr; - PHINode *P = dyn_cast(OpInst); + auto *P = dyn_cast(OpInst); if (!P) // If this operand is already visited, reuse the prior result. // We may have P != PHI if this is the deepest point at which the @@ -6443,10 +6443,10 @@ /// derived from a constant PHI. If this expression does not fit with these /// constraints, return null. static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { - Instruction *I = dyn_cast(V); + auto *I = dyn_cast(V); if (!I || !canConstantEvolve(I, L)) return nullptr; - if (PHINode *PN = dyn_cast(I)) + if (auto *PN = dyn_cast(I)) return PN; // Record non-constant instructions contained by the loop. @@ -6463,8 +6463,8 @@ const DataLayout &DL, const TargetLibraryInfo *TLI) { // Convenient constant check, but redundant for recursive calls. - if (Constant *C = dyn_cast(V)) return C; - Instruction *I = dyn_cast(V); + if (auto *C = dyn_cast(V)) return C; + auto *I = dyn_cast(V); if (!I) return nullptr; if (Constant *C = Vals.lookup(I)) return C; @@ -6481,7 +6481,7 @@ std::vector Operands(I->getNumOperands()); for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { - Instruction *Operand = dyn_cast(I->getOperand(i)); + auto *Operand = dyn_cast(I->getOperand(i)); if (!Operand) { Operands[i] = dyn_cast(I->getOperand(i)); if (!Operands[i]) return nullptr; @@ -6493,10 +6493,10 @@ Operands[i] = C; } - if (CmpInst *CI = dyn_cast(I)) + if (auto *CI = dyn_cast(I)) return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], Operands[1], DL, TLI); - if (LoadInst *LI = dyn_cast(I)) { + if (auto *LI = dyn_cast(I)) { if (!LI->isVolatile()) return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); } @@ -6553,7 +6553,7 @@ return nullptr; for (auto &I : *Header) { - PHINode *PHI = dyn_cast(&I); + auto *PHI = dyn_cast(&I); if (!PHI) break; auto *StartCST = getOtherIncomingValue(PHI, Latch); if (!StartCST) continue; @@ -6591,7 +6591,7 @@ // because that doesn't necessarily prevent us from computing PN. SmallVector, 8> PHIsToCompute; for (const auto &I : CurrentIterVals) { - PHINode *PHI = dyn_cast(I.first); + auto *PHI = dyn_cast(I.first); if (!PHI || PHI == PN || PHI->getParent() != Header) continue; PHIsToCompute.emplace_back(PHI, I.second); } @@ -6635,7 +6635,7 @@ assert(Latch && "Should follow from NumIncomingValues == 2!"); for (auto &I : *Header) { - PHINode *PHI = dyn_cast(&I); + auto *PHI = dyn_cast(&I); if (!PHI) break; auto *StartCST = getOtherIncomingValue(PHI, Latch); @@ -6670,7 +6670,7 @@ // into CurrentIterVals. SmallVector PHIsToCompute; for (const auto &I : CurrentIterVals) { - PHINode *PHI = dyn_cast(I.first); + auto *PHI = dyn_cast(I.first); if (!PHI || PHI->getParent() != Header) continue; PHIsToCompute.push_back(PHI); } @@ -6722,27 +6722,27 @@ case scUnknown: return dyn_cast(cast(V)->getValue()); case scSignExtend: { - const SCEVSignExtendExpr *SS = cast(V); + const auto *SS = cast(V); if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) return ConstantExpr::getSExt(CastOp, SS->getType()); break; } case scZeroExtend: { - const SCEVZeroExtendExpr *SZ = cast(V); + const auto *SZ = cast(V); if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) return ConstantExpr::getZExt(CastOp, SZ->getType()); break; } case scTruncate: { - const SCEVTruncateExpr *ST = cast(V); + const auto *ST = cast(V); if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) return ConstantExpr::getTrunc(CastOp, ST->getType()); break; } case scAddExpr: { - const SCEVAddExpr *SA = cast(V); + const auto *SA = cast(V); if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { - if (PointerType *PTy = dyn_cast(C->getType())) { + if (auto *PTy = dyn_cast(C->getType())) { unsigned AS = PTy->getAddressSpace(); Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS); C = ConstantExpr::getBitCast(C, DestPtrTy); @@ -6766,7 +6766,7 @@ if (C2->getType()->isPointerTy()) return nullptr; - if (PointerType *PTy = dyn_cast(C->getType())) { + if (auto *PTy = dyn_cast(C->getType())) { if (PTy->getElementType()->isStructTy()) C2 = ConstantExpr::getIntegerCast( C2, Type::getInt32Ty(C->getContext()), true); @@ -6779,7 +6779,7 @@ break; } case scMulExpr: { - const SCEVMulExpr *SM = cast(V); + const auto *SM = cast(V); if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { // Don't bother with pointers at all. if (C->getType()->isPointerTy()) return nullptr; @@ -6793,7 +6793,7 @@ break; } case scUDivExpr: { - const SCEVUDivExpr *SU = cast(V); + const auto *SU = cast(V); if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) if (LHS->getType() == RHS->getType()) @@ -6812,18 +6812,18 @@ // If this instruction is evolved from a constant-evolving PHI, compute the // exit value from the loop without using SCEVs. - if (const SCEVUnknown *SU = dyn_cast(V)) { - if (Instruction *I = dyn_cast(SU->getValue())) { + if (const auto *SU = dyn_cast(V)) { + if (auto *I = dyn_cast(SU->getValue())) { const Loop *LI = this->LI[I->getParent()]; if (LI && LI->getParentLoop() == L) // Looking for loop exit value. - if (PHINode *PN = dyn_cast(I)) + if (auto *PN = dyn_cast(I)) if (PN->getParent() == LI->getHeader()) { // Okay, there is no closed form solution for the PHI node. Check // to see if the loop that contains it has a known backedge-taken // count. If so, we may be able to force computation of the exit // value. const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI); - if (const SCEVConstant *BTCC = + if (const auto *BTCC = dyn_cast(BackedgeTakenCount)) { // Okay, we know how many times the containing loop executes. If // this is a constant evolving PHI node, get the final value at @@ -6842,7 +6842,7 @@ SmallVector Operands; bool MadeImprovement = false; for (Value *Op : I->operands()) { - if (Constant *C = dyn_cast(Op)) { + if (auto *C = dyn_cast(Op)) { Operands.push_back(C); continue; } @@ -6871,10 +6871,10 @@ if (MadeImprovement) { Constant *C = nullptr; const DataLayout &DL = getDataLayout(); - if (const CmpInst *CI = dyn_cast(I)) + if (const auto *CI = dyn_cast(I)) C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], Operands[1], DL, &TLI); - else if (const LoadInst *LI = dyn_cast(I)) { + else if (const auto *LI = dyn_cast(I)) { if (!LI->isVolatile()) C = ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL); } else @@ -6889,7 +6889,7 @@ return V; } - if (const SCEVCommutativeExpr *Comm = dyn_cast(V)) { + if (const auto *Comm = dyn_cast(V)) { // Avoid performing the look-up in the common case where the specified // expression has no loop-variant portions. for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) { @@ -6920,7 +6920,7 @@ return Comm; } - if (const SCEVUDivExpr *Div = dyn_cast(V)) { + if (const auto *Div = dyn_cast(V)) { const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L); const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L); if (LHS == Div->getLHS() && RHS == Div->getRHS()) @@ -6930,7 +6930,7 @@ // If this is a loop recurrence for a loop that does not contain L, then we // are dealing with the final value computed by the loop. - if (const SCEVAddRecExpr *AddRec = dyn_cast(V)) { + if (const auto *AddRec = dyn_cast(V)) { // First, attempt to evaluate each operand. // Avoid performing the look-up in the common case where the specified // expression has no loop-variant portions. @@ -6974,21 +6974,21 @@ return AddRec; } - if (const SCEVZeroExtendExpr *Cast = dyn_cast(V)) { + if (const auto *Cast = dyn_cast(V)) { const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); if (Op == Cast->getOperand()) return Cast; // must be loop invariant return getZeroExtendExpr(Op, Cast->getType()); } - if (const SCEVSignExtendExpr *Cast = dyn_cast(V)) { + if (const auto *Cast = dyn_cast(V)) { const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); if (Op == Cast->getOperand()) return Cast; // must be loop invariant return getSignExtendExpr(Op, Cast->getType()); } - if (const SCEVTruncateExpr *Cast = dyn_cast(V)) { + if (const auto *Cast = dyn_cast(V)) { const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L); if (Op == Cast->getOperand()) return Cast; // must be loop invariant @@ -7056,9 +7056,9 @@ static Optional> SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) { assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!"); - const SCEVConstant *LC = dyn_cast(AddRec->getOperand(0)); - const SCEVConstant *MC = dyn_cast(AddRec->getOperand(1)); - const SCEVConstant *NC = dyn_cast(AddRec->getOperand(2)); + const auto *LC = dyn_cast(AddRec->getOperand(0)); + const auto *MC = dyn_cast(AddRec->getOperand(1)); + const auto *NC = dyn_cast(AddRec->getOperand(2)); // We currently can only solve this if the coefficients are constants. if (!LC || !MC || !NC) @@ -7126,13 +7126,13 @@ SmallPtrSet Predicates; // If the value is a constant - if (const SCEVConstant *C = dyn_cast(V)) { + if (const auto *C = dyn_cast(V)) { // If the value is already zero, the branch will execute zero times. if (C->getValue()->isZero()) return C; return getCouldNotCompute(); // Otherwise it will loop infinitely. } - const SCEVAddRecExpr *AddRec = dyn_cast(V); + const auto *AddRec = dyn_cast(V); if (!AddRec && AllowPredicates) // Try to make this an AddRec using runtime tests, in the first X // iterations of this loop, where X is the SCEV expression found by the @@ -7149,7 +7149,7 @@ const SCEVConstant *R1 = Roots->first; const SCEVConstant *R2 = Roots->second; // Pick the smallest positive root value. - if (ConstantInt *CB = dyn_cast(ConstantExpr::getICmp( + if (auto *CB = dyn_cast(ConstantExpr::getICmp( CmpInst::ICMP_ULT, R1->getValue(), R2->getValue()))) { if (!CB->getZExtValue()) std::swap(R1, R2); // R1 is the minimum root now. @@ -7191,7 +7191,7 @@ // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step. // We have not yet seen any such cases. - const SCEVConstant *StepC = dyn_cast(Step); + const auto *StepC = dyn_cast(Step); if (!StepC || StepC->getValue()->equalsInt(0)) return getCouldNotCompute(); @@ -7289,7 +7289,7 @@ } // Then, try to solve the above equation provided that Start is constant. - if (const SCEVConstant *StartC = dyn_cast(Start)) { + if (const auto *StartC = dyn_cast(Start)) { const SCEV *E = SolveLinEquationWithOverflow( StepC->getValue()->getValue(), -StartC->getValue()->getValue(), *this); return ExitLimit(E, E, false, Predicates); @@ -7305,7 +7305,7 @@ // If the value is a constant, check to see if it is known to be non-zero // already. If so, the backedge will execute zero times. - if (const SCEVConstant *C = dyn_cast(V)) { + if (const auto *C = dyn_cast(V)) { if (!C->getValue()->isNullValue()) return getZero(C->getType()); return getCouldNotCompute(); // Otherwise it will loop infinitely. @@ -7351,10 +7351,10 @@ // Otherwise, if they're both SCEVUnknown, it's possible that they hold // two different instructions with the same value. Check for this case. - if (const SCEVUnknown *AU = dyn_cast(A)) - if (const SCEVUnknown *BU = dyn_cast(B)) - if (const Instruction *AI = dyn_cast(AU->getValue())) - if (const Instruction *BI = dyn_cast(BU->getValue())) + if (const auto *AU = dyn_cast(A)) + if (const auto *BU = dyn_cast(B)) + if (const auto *AI = dyn_cast(AU->getValue())) + if (const auto *BI = dyn_cast(BU->getValue())) if (ComputesEqualValues(AI, BI)) return true; @@ -7372,9 +7372,9 @@ return false; // Canonicalize a constant to the right side. - if (const SCEVConstant *LHSC = dyn_cast(LHS)) { + if (const auto *LHSC = dyn_cast(LHS)) { // Check for both operands constant. - if (const SCEVConstant *RHSC = dyn_cast(RHS)) { + if (const auto *RHSC = dyn_cast(RHS)) { if (ConstantExpr::getICmp(Pred, LHSC->getValue(), RHSC->getValue())->isNullValue()) @@ -7391,7 +7391,7 @@ // If we're comparing an addrec with a value which is loop-invariant in the // addrec's loop, put the addrec on the left. Also make a dominance check, // as both operands could be addrecs loop-invariant in each other's loop. - if (const SCEVAddRecExpr *AR = dyn_cast(RHS)) { + if (const auto *AR = dyn_cast(RHS)) { const Loop *L = AR->getLoop(); if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) { std::swap(LHS, RHS); @@ -7402,7 +7402,7 @@ // If there's a constant operand, canonicalize comparisons with boundary // cases, and canonicalize *-or-equal comparisons to regular comparisons. - if (const SCEVConstant *RC = dyn_cast(RHS)) { + if (const auto *RC = dyn_cast(RHS)) { const APInt &RA = RC->getAPInt(); bool SimplifiedByConstantRange = false; @@ -7433,8 +7433,8 @@ case ICmpInst::ICMP_NE: // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b. if (!RA) - if (const SCEVAddExpr *AE = dyn_cast(LHS)) - if (const SCEVMulExpr *ME = + if (const auto *AE = dyn_cast(LHS)) + if (const auto *ME = dyn_cast(AE->getOperand(0))) if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 && ME->getOperand(0)->isAllOnesValue()) { @@ -7594,8 +7594,8 @@ // every iteration of the loop. // If LHS and RHS are both addrec, both conditions must be true in // every iteration of the loop. - const SCEVAddRecExpr *LAR = dyn_cast(LHS); - const SCEVAddRecExpr *RAR = dyn_cast(RHS); + const auto *LAR = dyn_cast(LHS); + const auto *RAR = dyn_cast(RHS); bool LeftGuarded = false; bool RightGuarded = false; if (LAR) { @@ -7714,7 +7714,7 @@ Pred = ICmpInst::getSwappedPredicate(Pred); } - const SCEVAddRecExpr *ArLHS = dyn_cast(LHS); + const auto *ArLHS = dyn_cast(LHS); if (!ArLHS || ArLHS->getLoop() != L) return false; @@ -7894,7 +7894,7 @@ if (!Latch) return false; - BranchInst *LoopContinuePredicate = + auto *LoopContinuePredicate = dyn_cast(Latch->getTerminator()); if (LoopContinuePredicate && LoopContinuePredicate->isConditional() && isImpliedCond(Pred, LHS, RHS, @@ -7959,7 +7959,7 @@ if (!PBB) continue; - BranchInst *ContinuePredicate = dyn_cast(PBB->getTerminator()); + auto *ContinuePredicate = dyn_cast(PBB->getTerminator()); if (!ContinuePredicate || !ContinuePredicate->isConditional()) continue; @@ -8007,7 +8007,7 @@ if (isImpliedViaGuard(Pair.first, Pred, LHS, RHS)) return true; - BranchInst *LoopEntryPredicate = + auto *LoopEntryPredicate = dyn_cast(Pair.first->getTerminator()); if (!LoopEntryPredicate || LoopEntryPredicate->isUnconditional()) @@ -8045,7 +8045,7 @@ make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); }); // Recursively handle And and Or conditions. - if (BinaryOperator *BO = dyn_cast(FoundCondValue)) { + if (auto *BO = dyn_cast(FoundCondValue)) { if (BO->getOpcode() == Instruction::And) { if (!Inverse) return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) || @@ -8057,7 +8057,7 @@ } } - ICmpInst *ICI = dyn_cast(FoundCondValue); + auto *ICI = dyn_cast(FoundCondValue); if (!ICI) return false; // Now that we found a conditional branch that dominates the loop or controls @@ -8372,12 +8372,12 @@ /// If Expr computes ~A, return A else return nullptr static const SCEV *MatchNotExpr(const SCEV *Expr) { - const SCEVAddExpr *Add = dyn_cast(Expr); + const auto *Add = dyn_cast(Expr); if (!Add || Add->getNumOperands() != 2 || !Add->getOperand(0)->isAllOnesValue()) return nullptr; - const SCEVMulExpr *AddRHS = dyn_cast(Add->getOperand(1)); + const auto *AddRHS = dyn_cast(Add->getOperand(1)); if (!AddRHS || AddRHS->getNumOperands() != 2 || !AddRHS->getOperand(0)->isAllOnesValue()) return nullptr; @@ -8390,7 +8390,7 @@ template static bool IsMaxConsistingOf(const SCEV *MaybeMaxExpr, const SCEV *Candidate) { - const MaxExprType *MaxExpr = dyn_cast(MaybeMaxExpr); + const auto *MaxExpr = dyn_cast(MaybeMaxExpr); if (!MaxExpr) return false; return find(MaxExpr->operands(), Candidate) != MaxExpr->op_end(); @@ -8420,10 +8420,10 @@ if (!ICmpInst::isRelational(Pred)) return false; - const SCEVAddRecExpr *LAR = dyn_cast(LHS); + const auto *LAR = dyn_cast(LHS); if (!LAR) return false; - const SCEVAddRecExpr *RAR = dyn_cast(RHS); + const auto *RAR = dyn_cast(RHS); if (!RAR) return false; if (LAR->getLoop() != RAR->getLoop()) @@ -8630,7 +8630,7 @@ if (!isLoopInvariant(RHS, L)) return getCouldNotCompute(); - const SCEVAddRecExpr *IV = dyn_cast(LHS); + const auto *IV = dyn_cast(LHS); bool PredicatedIV = false; if (!IV && AllowPredicates) { @@ -8792,7 +8792,7 @@ if (!isLoopInvariant(RHS, L)) return getCouldNotCompute(); - const SCEVAddRecExpr *IV = dyn_cast(LHS); + const auto *IV = dyn_cast(LHS); if (!IV && AllowPredicates) // Try to make this an AddRec using runtime tests, in the first X // iterations of this loop, where X is the SCEV expression found by the @@ -8866,7 +8866,7 @@ return SE.getCouldNotCompute(); // If the start is a non-zero constant, shift the range to simplify things. - if (const SCEVConstant *SC = dyn_cast(getStart())) + if (const auto *SC = dyn_cast(getStart())) if (!SC->getValue()->isZero()) { SmallVector Operands(op_begin(), op_end()); Operands[0] = SE.getZero(SC->getType()); @@ -8937,7 +8937,7 @@ const SCEVConstant *R1 = Roots->first; const SCEVConstant *R2 = Roots->second; // Pick the smallest positive root value. - if (ConstantInt *CB = dyn_cast(ConstantExpr::getICmp( + if (auto *CB = dyn_cast(ConstantExpr::getICmp( ICmpInst::ICMP_ULT, R1->getValue(), R2->getValue()))) { if (!CB->getZExtValue()) std::swap(R1, R2); // R1 is the minimum root now. @@ -8994,7 +8994,7 @@ : SE(SE), Strides(S) {} bool follow(const SCEV *S) { - if (const SCEVAddRecExpr *AR = dyn_cast(S)) + if (const auto *AR = dyn_cast(S)) Strides.push_back(AR->getStepRecurrence(SE)); return true; } @@ -9136,7 +9136,7 @@ // End of recursion. if (Last == 0) { - if (const SCEVMulExpr *M = dyn_cast(Step)) { + if (const auto *M = dyn_cast(Step)) { SmallVector Qs; for (const SCEV *Op : M->operands()) if (!isa(Op)) @@ -9185,7 +9185,7 @@ // Return the number of product terms in S. static inline int numberOfTerms(const SCEV *S) { - if (const SCEVMulExpr *Expr = dyn_cast(S)) + if (const auto *Expr = dyn_cast(S)) return Expr->getNumOperands(); return 1; } @@ -9197,7 +9197,7 @@ if (isa(T)) return T; - if (const SCEVMulExpr *M = dyn_cast(T)) { + if (const auto *M = dyn_cast(T)) { SmallVector Factors; for (const SCEV *Op : M->operands()) if (!isa(Op)) @@ -9212,9 +9212,9 @@ /// Return the size of an element read or written by Inst. const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) { Type *Ty; - if (StoreInst *Store = dyn_cast(Inst)) + if (auto *Store = dyn_cast(Inst)) Ty = Store->getValueOperand()->getType(); - else if (LoadInst *Load = dyn_cast(Inst)) + else if (auto *Load = dyn_cast(Inst)) Ty = Load->getType(); else return nullptr; @@ -9439,7 +9439,7 @@ void ScalarEvolution::SCEVCallbackVH::deleted() { assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!"); - if (PHINode *PN = dyn_cast(getValPtr())) + if (auto *PN = dyn_cast(getValPtr())) SE->ConstantEvolutionLoopExitValue.erase(PN); SE->eraseValueFromMap(getValPtr()); // this now dangles! @@ -9462,13 +9462,13 @@ continue; if (!Visited.insert(U).second) continue; - if (PHINode *PN = dyn_cast(U)) + if (auto *PN = dyn_cast(U)) SE->ConstantEvolutionLoopExitValue.erase(PN); SE->eraseValueFromMap(U); Worklist.insert(Worklist.end(), U->user_begin(), U->user_end()); } // Delete the Old value. - if (PHINode *PN = dyn_cast(Old)) + if (auto *PN = dyn_cast(Old)) SE->ConstantEvolutionLoopExitValue.erase(PN); SE->eraseValueFromMap(Old); // this now dangles! @@ -9739,7 +9739,7 @@ case scSignExtend: return getLoopDisposition(cast(S)->getOperand(), L); case scAddRecExpr: { - const SCEVAddRecExpr *AR = cast(S); + const auto *AR = cast(S); // If L is the addrec's loop, it's computable. if (AR->getLoop() == L) @@ -9781,7 +9781,7 @@ return HasVarying ? LoopComputable : LoopInvariant; } case scUDivExpr: { - const SCEVUDivExpr *UDiv = cast(S); + const auto *UDiv = cast(S); LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L); if (LD == LoopVariant) return LoopVariant; @@ -9846,7 +9846,7 @@ // to test for proper dominance too, because the instruction which // produces the addrec's value is a PHI, and a PHI effectively properly // dominates its entire containing block. - const SCEVAddRecExpr *AR = cast(S); + const auto *AR = cast(S); if (!DT.dominates(AR->getLoop()->getHeader(), BB)) return DoesNotDominateBlock; @@ -9857,7 +9857,7 @@ case scMulExpr: case scUMaxExpr: case scSMaxExpr: { - const SCEVNAryExpr *NAry = cast(S); + const auto *NAry = cast(S); bool Proper = true; for (const SCEV *NAryOp : NAry->operands()) { BlockDisposition D = getBlockDisposition(NAryOp, BB); @@ -9869,7 +9869,7 @@ return Proper ? ProperlyDominatesBlock : DominatesBlock; } case scUDivExpr: { - const SCEVUDivExpr *UDiv = cast(S); + const auto *UDiv = cast(S); const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS(); BlockDisposition LD = getBlockDisposition(LHS, BB); if (LD == DoesNotDominateBlock) @@ -9881,7 +9881,7 @@ ProperlyDominatesBlock : DominatesBlock; } case scUnknown: - if (Instruction *I = + if (auto *I = dyn_cast(cast(S)->getValue())) { if (I->getParent() == BB) return DominatesBlock; @@ -10145,7 +10145,7 @@ const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) { const SCEV *Operand = visit(Expr->getOperand()); - const SCEVAddRecExpr *AR = dyn_cast(Operand); + const auto *AR = dyn_cast(Operand); if (AR && AR->getLoop() == L && AR->isAffine()) { // This couldn't be folded because the operand didn't have the nuw // flag. Add the nusw flag as an assumption that we could make. @@ -10161,7 +10161,7 @@ const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) { const SCEV *Operand = visit(Expr->getOperand()); - const SCEVAddRecExpr *AR = dyn_cast(Operand); + const auto *AR = dyn_cast(Operand); if (AR && AR->getLoop() == L && AR->isAffine()) { // This couldn't be folded because the operand didn't have the nsw // flag. Add the nssw flag as an assumption that we could make. Index: lib/Analysis/ScalarEvolutionAliasAnalysis.cpp =================================================================== --- lib/Analysis/ScalarEvolutionAliasAnalysis.cpp +++ lib/Analysis/ScalarEvolutionAliasAnalysis.cpp @@ -93,16 +93,16 @@ /// /// Returns null if none was found. Value *SCEVAAResult::GetBaseValue(const SCEV *S) { - if (const SCEVAddRecExpr *AR = dyn_cast(S)) { + if (const auto *AR = dyn_cast(S)) { // In an addrec, assume that the base will be in the start, rather // than the step. return GetBaseValue(AR->getStart()); - } else if (const SCEVAddExpr *A = dyn_cast(S)) { + } else if (const auto *A = dyn_cast(S)) { // If there's a pointer operand, it'll be sorted at the end of the list. const SCEV *Last = A->getOperand(A->getNumOperands() - 1); if (Last->getType()->isPointerTy()) return GetBaseValue(Last); - } else if (const SCEVUnknown *U = dyn_cast(S)) { + } else if (const auto *U = dyn_cast(S)) { // This is a leaf node. return U->getValue(); } Index: lib/Analysis/ScalarEvolutionExpander.cpp =================================================================== --- lib/Analysis/ScalarEvolutionExpander.cpp +++ lib/Analysis/ScalarEvolutionExpander.cpp @@ -54,7 +54,7 @@ // Check to see if there is already a cast! for (User *U : V->users()) if (U->getType() == Ty) - if (CastInst *CI = dyn_cast(U)) + if (auto *CI = dyn_cast(U)) if (CI->getOpcode() == Op) { // If the cast isn't where we want it, create a new cast at IP. // Likewise, do not reuse a cast at BIP because it must dominate @@ -122,7 +122,7 @@ if (Op == Instruction::BitCast) { if (V->getType() == Ty) return V; - if (CastInst *CI = dyn_cast(V)) { + if (auto *CI = dyn_cast(V)) { if (CI->getOperand(0)->getType() == Ty) return CI->getOperand(0); } @@ -130,13 +130,13 @@ // Short-circuit unnecessary inttoptr<->ptrtoint casts. if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) && SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) { - if (CastInst *CI = dyn_cast(V)) + if (auto *CI = dyn_cast(V)) if ((CI->getOpcode() == Instruction::PtrToInt || CI->getOpcode() == Instruction::IntToPtr) && SE.getTypeSizeInBits(CI->getType()) == SE.getTypeSizeInBits(CI->getOperand(0)->getType())) return CI->getOperand(0); - if (ConstantExpr *CE = dyn_cast(V)) + if (auto *CE = dyn_cast(V)) if ((CE->getOpcode() == Instruction::PtrToInt || CE->getOpcode() == Instruction::IntToPtr) && SE.getTypeSizeInBits(CE->getType()) == @@ -145,12 +145,12 @@ } // Fold a cast of a constant. - if (Constant *C = dyn_cast(V)) + if (auto *C = dyn_cast(V)) return ConstantExpr::getCast(Op, C, Ty); // Cast the argument at the beginning of the entry block, after // any bitcasts of other arguments. - if (Argument *A = dyn_cast(V)) { + if (auto *A = dyn_cast(V)) { BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin(); while ((isa(IP) && isa(cast(IP)->getOperand(0)) && @@ -161,7 +161,7 @@ } // Cast the instruction immediately after the instruction. - Instruction *I = cast(V); + auto *I = cast(V); BasicBlock::iterator IP = findInsertPointAfter(I, Builder.GetInsertBlock()); return ReuseOrCreateCast(I, Ty, Op, IP); } @@ -171,8 +171,8 @@ Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS) { // Fold a binop with constant operands. - if (Constant *CLHS = dyn_cast(LHS)) - if (Constant *CRHS = dyn_cast(RHS)) + if (auto *CLHS = dyn_cast(LHS)) + if (auto *CRHS = dyn_cast(RHS)) return ConstantExpr::get(Opcode, CLHS, CRHS); // Do a quick scan to see if we have this binop nearby. If so, reuse it. @@ -209,7 +209,7 @@ } // If we haven't found this binop, insert it. - Instruction *BO = cast(Builder.CreateBinOp(Opcode, LHS, RHS)); + auto *BO = cast(Builder.CreateBinOp(Opcode, LHS, RHS)); BO->setDebugLoc(Loc); rememberInstruction(BO); @@ -237,12 +237,12 @@ } // For a Constant, check for a multiple of the given factor. - if (const SCEVConstant *C = dyn_cast(S)) { + if (const auto *C = dyn_cast(S)) { // 0/x == 0. if (C->isZero()) return true; // Check for divisibility. - if (const SCEVConstant *FC = dyn_cast(Factor)) { + if (const auto *FC = dyn_cast(Factor)) { ConstantInt *CI = ConstantInt::get(SE.getContext(), C->getAPInt().sdiv(FC->getAPInt())); // If the quotient is zero and the remainder is non-zero, reject @@ -260,11 +260,11 @@ // In a Mul, check if there is a constant operand which is a multiple // of the given factor. - if (const SCEVMulExpr *M = dyn_cast(S)) { + if (const auto *M = dyn_cast(S)) { // Size is known, check if there is a constant operand which is a multiple // of the given factor. If so, we can factor it. - const SCEVConstant *FC = cast(Factor); - if (const SCEVConstant *C = dyn_cast(M->getOperand(0))) + const auto *FC = cast(Factor); + if (const auto *C = dyn_cast(M->getOperand(0))) if (!C->getAPInt().srem(FC->getAPInt())) { SmallVector NewMulOps(M->op_begin(), M->op_end()); NewMulOps[0] = SE.getConstant(C->getAPInt().sdiv(FC->getAPInt())); @@ -274,7 +274,7 @@ } // In an AddRec, check if both start and step are divisible. - if (const SCEVAddRecExpr *A = dyn_cast(S)) { + if (const auto *A = dyn_cast(S)) { const SCEV *Step = A->getStepRecurrence(SE); const SCEV *StepRem = SE.getConstant(Step->getType(), 0); if (!FactorOutConstant(Step, StepRem, Factor, SE, DL)) @@ -312,7 +312,7 @@ // If it returned an add, use the operands. Otherwise it simplified // the sum into a single value, so just use that. Ops.clear(); - if (const SCEVAddExpr *Add = dyn_cast(Sum)) + if (const auto *Add = dyn_cast(Sum)) Ops.append(Add->op_begin(), Add->op_end()); else if (!Sum->isZero()) Ops.push_back(Sum); @@ -331,7 +331,7 @@ // Find the addrecs. SmallVector AddRecs; for (unsigned i = 0, e = Ops.size(); i != e; ++i) - while (const SCEVAddRecExpr *A = dyn_cast(Ops[i])) { + while (const auto *A = dyn_cast(Ops[i])) { const SCEV *Start = A->getStart(); if (Start->isZero()) break; const SCEV *Zero = SE.getConstant(Ty, 0); @@ -339,7 +339,7 @@ A->getStepRecurrence(SE), A->getLoop(), A->getNoWrapFlags(SCEV::FlagNW))); - if (const SCEVAddExpr *Add = dyn_cast(Start)) { + if (const auto *Add = dyn_cast(Start)) { Ops[i] = Zero; Ops.append(Add->op_begin(), Add->op_end()); e += Add->getNumOperands(); @@ -444,7 +444,7 @@ GepIndices.push_back(Scaled); // Collect struct field index operands. - while (StructType *STy = dyn_cast(ElTy)) { + while (auto *STy = dyn_cast(ElTy)) { bool FoundFieldNo = false; // An empty struct has no fields. if (STy->getNumElements() == 0) break; @@ -452,7 +452,7 @@ // the struct fields. if (Ops.empty()) break; - if (const SCEVConstant *C = dyn_cast(Ops[0])) + if (const auto *C = dyn_cast(Ops[0])) if (SE.getTypeSizeInBits(C->getType()) <= 64) { const StructLayout &SL = *DL.getStructLayout(STy); uint64_t FullOffset = C->getValue()->getZExtValue(); @@ -477,7 +477,7 @@ } } - if (ArrayType *ATy = dyn_cast(ElTy)) + if (auto *ATy = dyn_cast(ElTy)) ElTy = ATy->getElementType(); else break; @@ -498,8 +498,8 @@ Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty); // Fold a GEP with constant operands. - if (Constant *CLHS = dyn_cast(V)) - if (Constant *CRHS = dyn_cast(Idx)) + if (auto *CLHS = dyn_cast(V)) + if (auto *CRHS = dyn_cast(Idx)) return ConstantExpr::getGetElementPtr(Type::getInt8Ty(Ty->getContext()), CLHS, CRHS); @@ -601,25 +601,25 @@ if (isa(S)) // A constant has no relevant loops. return nullptr; - if (const SCEVUnknown *U = dyn_cast(S)) { - if (const Instruction *I = dyn_cast(U->getValue())) + if (const auto *U = dyn_cast(S)) { + if (const auto *I = dyn_cast(U->getValue())) return Pair.first->second = SE.LI.getLoopFor(I->getParent()); // A non-instruction has no relevant loops. return nullptr; } - if (const SCEVNAryExpr *N = dyn_cast(S)) { + if (const auto *N = dyn_cast(S)) { const Loop *L = nullptr; - if (const SCEVAddRecExpr *AR = dyn_cast(S)) + if (const auto *AR = dyn_cast(S)) L = AR->getLoop(); for (const SCEV *Op : N->operands()) L = PickMostRelevantLoop(L, getRelevantLoop(Op), SE.DT); return RelevantLoops[N] = L; } - if (const SCEVCastExpr *C = dyn_cast(S)) { + if (const auto *C = dyn_cast(S)) { const Loop *Result = getRelevantLoop(C->getOperand()); return RelevantLoops[C] = Result; } - if (const SCEVUDivExpr *D = dyn_cast(S)) { + if (const auto *D = dyn_cast(S)) { const Loop *Result = PickMostRelevantLoop( getRelevantLoop(D->getLHS()), getRelevantLoop(D->getRHS()), SE.DT); return RelevantLoops[D] = Result; @@ -688,7 +688,7 @@ // This is the first operand. Just expand it. Sum = expand(Op); ++I; - } else if (PointerType *PTy = dyn_cast(Sum->getType())) { + } else if (auto *PTy = dyn_cast(Sum->getType())) { // The running sum expression is a pointer. Try to form a getelementptr // at this level with that as the base. SmallVector NewOps; @@ -696,13 +696,13 @@ // If the operand is SCEVUnknown and not instructions, peek through // it, to enable more of it to be folded into the GEP. const SCEV *X = I->second; - if (const SCEVUnknown *U = dyn_cast(X)) + if (const auto *U = dyn_cast(X)) if (!isa(U->getValue())) X = SE.getSCEV(U->getValue()); NewOps.push_back(X); } Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum); - } else if (PointerType *PTy = dyn_cast(Op->getType())) { + } else if (auto *PTy = dyn_cast(Op->getType())) { // The running sum is an integer, and there's a pointer at this level. // Try to form a getelementptr. If the running sum is instructions, // use a SCEVUnknown to avoid re-analyzing them. @@ -782,7 +782,7 @@ Type *Ty = SE.getEffectiveSCEVType(S->getType()); Value *LHS = expandCodeFor(S->getLHS(), Ty); - if (const SCEVConstant *SC = dyn_cast(S->getRHS())) { + if (const auto *SC = dyn_cast(S->getRHS())) { const APInt &RHS = SC->getAPInt(); if (RHS.isPowerOf2()) return InsertBinop(Instruction::LShr, LHS, @@ -798,7 +798,7 @@ /// GEP expansion. static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest, ScalarEvolution &SE) { - while (const SCEVAddRecExpr *A = dyn_cast(Base)) { + while (const auto *A = dyn_cast(Base)) { Base = A->getStart(); Rest = SE.getAddExpr(Rest, SE.getAddRecExpr(SE.getConstant(A->getType(), 0), @@ -806,7 +806,7 @@ A->getLoop(), A->getNoWrapFlags(SCEV::FlagNW))); } - if (const SCEVAddExpr *A = dyn_cast(Base)) { + if (const auto *A = dyn_cast(Base)) { Base = A->getOperand(A->getNumOperands()-1); SmallVector NewAddOps(A->op_begin(), A->op_end()); NewAddOps.back() = Rest; @@ -828,7 +828,7 @@ if (L == IVIncInsertLoop) { for (User::op_iterator OI = IncV->op_begin()+1, OE = IncV->op_end(); OI != OE; ++OI) - if (Instruction *OInst = dyn_cast(OI)) + if (auto *OInst = dyn_cast(OI)) if (!SE.DT.dominates(OInst, IVIncInsertPos)) return false; } @@ -867,7 +867,7 @@ // Check for a simple Add/Sub or GEP of a loop invariant step. case Instruction::Add: case Instruction::Sub: { - Instruction *OInst = dyn_cast(IncV->getOperand(1)); + auto *OInst = dyn_cast(IncV->getOperand(1)); if (!OInst || SE.DT.dominates(OInst, InsertPos)) return dyn_cast(IncV->getOperand(0)); return nullptr; @@ -878,7 +878,7 @@ for (auto I = IncV->op_begin() + 1, E = IncV->op_end(); I != E; ++I) { if (isa(*I)) continue; - if (Instruction *OInst = dyn_cast(*I)) { + if (auto *OInst = dyn_cast(*I)) { if (!SE.DT.dominates(OInst, InsertPos)) return nullptr; } @@ -979,7 +979,7 @@ Value *IncV; // If the PHI is a pointer, use a GEP, otherwise use an add or sub. if (ExpandTy->isPointerTy()) { - PointerType *GEPPtrTy = cast(ExpandTy); + auto *GEPPtrTy = cast(ExpandTy); // If the step isn't constant, don't use an implicitly scaled GEP, because // that would require a multiply inside the loop. if (!isa(StepV)) @@ -1108,7 +1108,7 @@ if (!PN || !SE.isSCEVable(PN->getType())) continue; - const SCEVAddRecExpr *PhiSCEV = dyn_cast(SE.getSCEV(PN)); + const auto *PhiSCEV = dyn_cast(SE.getSCEV(PN)); if (!PhiSCEV) continue; @@ -1119,7 +1119,7 @@ if (!IsMatchingSCEV && !TryNonMatchingSCEV) continue; - Instruction *TempIncV = + auto *TempIncV = cast(PN->getIncomingValueForBlock(LatchBlock)); // Check whether we can reuse this PHI node. @@ -1380,7 +1380,7 @@ // Re-apply any non-loop-dominating offset. if (PostLoopOffset) { - if (PointerType *PTy = dyn_cast(ExpandTy)) { + if (auto *PTy = dyn_cast(ExpandTy)) { const SCEV *const OffsetArray[1] = { PostLoopOffset }; Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result); } else { @@ -1437,7 +1437,7 @@ // Dig into the expression to find the pointer base for a GEP. ExposePointerBase(Base, RestArray[0], SE); // If we found a pointer, expand the AddRec with a GEP. - if (PointerType *PTy = dyn_cast(Base->getType())) { + if (auto *PTy = dyn_cast(Base->getType())) { // Make sure the Base isn't something exotic, such as a multiplied // or divided pointer value. In those cases, the result type isn't // actually a pointer type. @@ -1704,7 +1704,7 @@ if (!V) V = visit(S); else if (VO.second) { - if (PointerType *Vty = dyn_cast(V->getType())) { + if (auto *Vty = dyn_cast(V->getType())) { Type *Ety = Vty->getPointerElementType(); int64_t Offset = VO.second->getSExtValue(); int64_t ESize = SE.getTypeSizeInBits(Ety); @@ -1758,7 +1758,7 @@ // Emit code for it. SCEVInsertPointGuard Guard(Builder, this); - PHINode *V = + auto *V = cast(expandCodeFor(H, nullptr, &L->getHeader()->front())); return V; @@ -1843,9 +1843,9 @@ continue; if (BasicBlock *LatchBlock = L->getLoopLatch()) { - Instruction *OrigInc = dyn_cast( + auto *OrigInc = dyn_cast( OrigPhiRef->getIncomingValueForBlock(LatchBlock)); - Instruction *IsomorphicInc = + auto *IsomorphicInc = dyn_cast(Phi->getIncomingValueForBlock(LatchBlock)); if (OrigInc && IsomorphicInc) { @@ -1879,7 +1879,7 @@ Value *NewInc = OrigInc; if (OrigInc->getType() != IsomorphicInc->getType()) { Instruction *IP = nullptr; - if (PHINode *PN = dyn_cast(OrigInc)) + if (auto *PN = dyn_cast(OrigInc)) IP = &*PN->getParent()->getFirstInsertionPt(); else IP = OrigInc->getNextNode(); @@ -2024,7 +2024,7 @@ // Recurse past nary expressions, which commonly occur in the // BackedgeTakenCount. They may already exist in program code, and if not, // they are not too expensive rematerialize. - if (const SCEVNAryExpr *NAry = dyn_cast(S)) { + if (const auto *NAry = dyn_cast(S)) { for (auto *Op : NAry->operands()) if (isHighCostExpansionHelper(Op, L, At, Processed)) return true; @@ -2214,14 +2214,14 @@ SCEVFindUnsafe(ScalarEvolution &se): SE(se), IsUnsafe(false) {} bool follow(const SCEV *S) { - if (const SCEVUDivExpr *D = dyn_cast(S)) { - const SCEVConstant *SC = dyn_cast(D->getRHS()); + if (const auto *D = dyn_cast(S)) { + const auto *SC = dyn_cast(D->getRHS()); if (!SC || SC->getValue()->isZero()) { IsUnsafe = true; return false; } } - if (const SCEVAddRecExpr *AR = dyn_cast(S)) { + if (const auto *AR = dyn_cast(S)) { const SCEV *Step = AR->getStepRecurrence(SE); if (!AR->isAffine() && !SE.dominates(Step, AR->getLoop()->getHeader())) { IsUnsafe = true; Index: lib/Analysis/ScalarEvolutionNormalization.cpp =================================================================== --- lib/Analysis/ScalarEvolutionNormalization.cpp +++ lib/Analysis/ScalarEvolutionNormalization.cpp @@ -44,7 +44,7 @@ // can live in blocks that are not dominated by the latch block, but (since // their uses occur in the predecessor block, not the block the PHI lives in) // should still use the post-inc value. Check for this case now. - PHINode *PN = dyn_cast(User); + auto *PN = dyn_cast(User); if (!PN || !Operand) return false; // not a phi, not dominated by latch block. // Look at all of the uses of Operand by the PHI node. If any use corresponds @@ -91,7 +91,7 @@ const SCEV *PostIncTransform:: TransformImpl(const SCEV *S, Instruction *User, Value *OperandValToReplace) { - if (const SCEVCastExpr *X = dyn_cast(S)) { + if (const auto *X = dyn_cast(S)) { const SCEV *O = X->getOperand(); const SCEV *N = TransformSubExpr(O, User, OperandValToReplace); if (O != N) @@ -104,7 +104,7 @@ return S; } - if (const SCEVAddRecExpr *AR = dyn_cast(S)) { + if (const auto *AR = dyn_cast(S)) { // An addrec. This is the interesting part. SmallVector Operands; const Loop *L = AR->getLoop(); @@ -187,7 +187,7 @@ return Result; } - if (const SCEVNAryExpr *X = dyn_cast(S)) { + if (const auto *X = dyn_cast(S)) { SmallVector Operands; bool Changed = false; // Transform each operand. @@ -210,7 +210,7 @@ return S; } - if (const SCEVUDivExpr *X = dyn_cast(S)) { + if (const auto *X = dyn_cast(S)) { const SCEV *LO = X->getLHS(); const SCEV *RO = X->getRHS(); const SCEV *LN = TransformSubExpr(LO, User, OperandValToReplace); Index: lib/Analysis/ScopedNoAliasAA.cpp =================================================================== --- lib/Analysis/ScopedNoAliasAA.cpp +++ lib/Analysis/ScopedNoAliasAA.cpp @@ -130,7 +130,7 @@ static void collectMDInDomain(const MDNode *List, const MDNode *Domain, SmallPtrSetImpl &Nodes) { for (const MDOperand &MDOp : List->operands()) - if (const MDNode *MD = dyn_cast(MDOp)) + if (const auto *MD = dyn_cast(MDOp)) if (AliasScopeNode(MD).getDomain() == Domain) Nodes.insert(MD); } @@ -143,7 +143,7 @@ // Collect the set of scope domains relevant to the noalias scopes. SmallPtrSet Domains; for (const MDOperand &MDOp : NoAlias->operands()) - if (const MDNode *NAMD = dyn_cast(MDOp)) + if (const auto *NAMD = dyn_cast(MDOp)) if (const MDNode *Domain = AliasScopeNode(NAMD).getDomain()) Domains.insert(Domain); Index: lib/Analysis/SparsePropagation.cpp =================================================================== --- lib/Analysis/SparsePropagation.cpp +++ lib/Analysis/SparsePropagation.cpp @@ -57,9 +57,9 @@ LatticeVal LV; if (LatticeFunc->IsUntrackedValue(V)) return LatticeFunc->getUntrackedVal(); - else if (Constant *C = dyn_cast(V)) + else if (auto *C = dyn_cast(V)) LV = LatticeFunc->ComputeConstant(C); - else if (Argument *A = dyn_cast(V)) + else if (auto *A = dyn_cast(V)) LV = LatticeFunc->ComputeArgument(A); else if (!isa(V)) // All other non-instructions are overdefined. @@ -124,7 +124,7 @@ Succs.resize(TI.getNumSuccessors()); if (TI.getNumSuccessors() == 0) return; - if (BranchInst *BI = dyn_cast(&TI)) { + if (auto *BI = dyn_cast(&TI)) { if (BI->isUnconditional()) { Succs[0] = true; return; @@ -171,7 +171,7 @@ return; } - SwitchInst &SI = cast(TI); + auto &SI = cast(TI); LatticeVal SCValue; if (AggressiveUndef) SCValue = getOrInitValueState(SI.getCondition()); @@ -277,7 +277,7 @@ void SparseSolver::visitInst(Instruction &I) { // PHIs are handled by the propagation logic, they are never passed into the // transfer functions. - if (PHINode *PN = dyn_cast(&I)) + if (auto *PN = dyn_cast(&I)) return visitPHINode(*PN); // Otherwise, ask the transfer function what the result is. If this is @@ -286,7 +286,7 @@ if (IV != LatticeFunc->getUntrackedVal()) UpdateState(I, IV); - if (TerminatorInst *TI = dyn_cast(&I)) + if (auto *TI = dyn_cast(&I)) visitTerminatorInst(*TI); } @@ -305,7 +305,7 @@ // "I" got into the work list because it made a transition. See if any // users are both live and in need of updating. for (User *U : I->users()) { - Instruction *UI = cast(U); + auto *UI = cast(U); if (BBExecutable.count(UI->getParent())) // Inst is executable? visitInst(*UI); } Index: lib/Analysis/TypeBasedAliasAnalysis.cpp =================================================================== --- lib/Analysis/TypeBasedAliasAnalysis.cpp +++ lib/Analysis/TypeBasedAliasAnalysis.cpp @@ -153,7 +153,7 @@ TBAANodeImpl getParent() const { if (Node->getNumOperands() < 2) return TBAANodeImpl(); - MDNodeTy *P = dyn_cast_or_null(Node->getOperand(1)); + auto *P = dyn_cast_or_null(Node->getOperand(1)); if (!P) return TBAANodeImpl(); // Ok, this node has a valid parent. Return it. @@ -252,7 +252,7 @@ : mdconst::extract(Node->getOperand(2)) ->getZExtValue(); Offset -= Cur; - MDNode *P = dyn_cast_or_null(Node->getOperand(1)); + auto *P = dyn_cast_or_null(Node->getOperand(1)); if (!P) return TBAAStructTypeNode(); return TBAAStructTypeNode(P); @@ -277,7 +277,7 @@ uint64_t Cur = mdconst::extract(Node->getOperand(TheIdx + 1)) ->getZExtValue(); Offset -= Cur; - MDNode *P = dyn_cast_or_null(Node->getOperand(TheIdx)); + auto *P = dyn_cast_or_null(Node->getOperand(TheIdx)); if (!P) return TBAAStructTypeNode(); return TBAAStructTypeNode(P); @@ -389,7 +389,7 @@ if (!isStructPathTBAA(this)) { if (getNumOperands() < 1) return false; - if (MDString *Tag1 = dyn_cast(getOperand(0))) { + if (auto *Tag1 = dyn_cast(getOperand(0))) { if (Tag1->getString() == "vtable pointer") return true; } @@ -399,10 +399,10 @@ // For struct-path aware TBAA, we use the access type of the tag. if (getNumOperands() < 2) return false; - MDNode *Tag = cast_or_null(getOperand(1)); + auto *Tag = cast_or_null(getOperand(1)); if (!Tag) return false; - if (MDString *Tag1 = dyn_cast(Tag->getOperand(0))) { + if (auto *Tag1 = dyn_cast(Tag->getOperand(0))) { if (Tag1->getString() == "vtable pointer") return true; } Index: lib/Analysis/ValueTracking.cpp =================================================================== --- lib/Analysis/ValueTracking.cpp +++ lib/Analysis/ValueTracking.cpp @@ -144,7 +144,7 @@ "LHS and RHS should have the same type"); assert(LHS->getType()->isIntOrIntVectorTy() && "LHS and RHS should be integers"); - IntegerType *IT = cast(LHS->getType()->getScalarType()); + auto *IT = cast(LHS->getType()->getScalarType()); APInt LHSKnownZero(IT->getBitWidth(), 0), LHSKnownOne(IT->getBitWidth(), 0); APInt RHSKnownZero(IT->getBitWidth(), 0), RHSKnownOne(IT->getBitWidth(), 0); computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, 0, AC, CxtI, DT); @@ -250,7 +250,7 @@ APInt &KnownZero2, APInt &KnownOne2, unsigned Depth, const Query &Q) { if (!Add) { - if (const ConstantInt *CLHS = dyn_cast(Op0)) { + if (const auto *CLHS = dyn_cast(Op0)) { // We know that the top bits of C-X are clear if X contains less bits // than C (i.e. no wrap-around can happen). For example, 20-X is // positive if we can prove that X is >= 0 and < 16. @@ -433,7 +433,7 @@ return true; EphValues.insert(V); - if (const User *U = dyn_cast(V)) + if (const auto *U = dyn_cast(V)) for (User::const_op_iterator J = U->op_begin(), JE = U->op_end(); J != JE; ++J) { if (isSafeToSpeculativelyExecute(*J)) @@ -447,7 +447,7 @@ // Is this an intrinsic that cannot be speculated but also cannot trap? static bool isAssumeLikeIntrinsic(const Instruction *I) { - if (const CallInst *CI = dyn_cast(I)) + if (const auto *CI = dyn_cast(I)) if (Function *F = CI->getCalledFunction()) switch (F->getIntrinsicID()) { default: break; @@ -529,7 +529,7 @@ for (auto &AssumeVH : Q.AC->assumptions()) { if (!AssumeVH) continue; - CallInst *I = cast(AssumeVH); + auto *I = cast(AssumeVH); assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && "Got assumption for the wrong function!"); if (Q.isExcluded(I)) @@ -1149,7 +1149,7 @@ break; } case Instruction::SRem: - if (ConstantInt *Rem = dyn_cast(I->getOperand(1))) { + if (auto *Rem = dyn_cast(I->getOperand(1))) { APInt RA = Rem->getValue().abs(); if (RA.isPowerOf2()) { APInt LowBits = RA - 1; @@ -1187,7 +1187,7 @@ break; case Instruction::URem: { - if (ConstantInt *Rem = dyn_cast(I->getOperand(1))) { + if (auto *Rem = dyn_cast(I->getOperand(1))) { const APInt &RA = Rem->getValue(); if (RA.isPowerOf2()) { APInt LowBits = (RA - 1); @@ -1211,7 +1211,7 @@ } case Instruction::Alloca: { - const AllocaInst *AI = cast(I); + const auto *AI = cast(I); unsigned Align = AI->getAlignment(); if (Align == 0) Align = Q.DL.getABITypeAlignment(AI->getAllocatedType()); @@ -1235,7 +1235,7 @@ // Handle struct member offset arithmetic. // Handle case when index is vector zeroinitializer - Constant *CIndex = cast(Index); + auto *CIndex = cast(Index); if (CIndex->isZeroValue()) continue; @@ -1268,7 +1268,7 @@ break; } case Instruction::PHI: { - const PHINode *P = cast(I); + const auto *P = cast(I); // Handle the case of a simple two-predecessor recurrence PHI. // There's a lot more that could theoretically be done here, but // this is sufficient to catch some interesting cases. @@ -1276,7 +1276,7 @@ for (unsigned i = 0; i != 2; ++i) { Value *L = P->getIncomingValue(i); Value *R = P->getIncomingValue(!i); - Operator *LU = dyn_cast(L); + auto *LU = dyn_cast(L); if (!LU) continue; unsigned Opcode = LU->getOpcode(); @@ -1394,7 +1394,7 @@ KnownZero |= KnownZero2; KnownOne |= KnownOne2; } - if (const IntrinsicInst *II = dyn_cast(I)) { + if (const auto *II = dyn_cast(I)) { switch (II->getIntrinsicID()) { default: break; case Intrinsic::bswap: @@ -1439,8 +1439,8 @@ computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q); break; case Instruction::ExtractValue: - if (IntrinsicInst *II = dyn_cast(I->getOperand(0))) { - const ExtractValueInst *EVI = cast(I); + if (auto *II = dyn_cast(I->getOperand(0))) { + const auto *EVI = cast(I); if (EVI->getNumIndices() != 1) break; if (EVI->getIndices()[0] == 0) { switch (II->getIntrinsicID()) { @@ -1515,7 +1515,7 @@ } // Handle a constant vector by taking the intersection of the known bits of // each element. - if (const ConstantDataSequential *CDS = dyn_cast(V)) { + if (const auto *CDS = dyn_cast(V)) { // We know that CDS must be a vector of integers. Take the intersection of // each element. KnownZero.setAllBits(); KnownOne.setAllBits(); @@ -1566,13 +1566,13 @@ // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has // the bits of its aliasee. - if (const GlobalAlias *GA = dyn_cast(V)) { + if (const auto *GA = dyn_cast(V)) { if (!GA->isInterposable()) computeKnownBits(GA->getAliasee(), KnownZero, KnownOne, Depth + 1, Q); return; } - if (const Operator *I = dyn_cast(V)) + if (const auto *I = dyn_cast(V)) computeKnownBitsFromOperator(I, KnownZero, KnownOne, Depth, Q); // Aligned pointers have trailing zeros - refine KnownZero set @@ -1614,7 +1614,7 @@ /// types and vectors of integers. bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, const Query &Q) { - if (const Constant *C = dyn_cast(V)) { + if (const auto *C = dyn_cast(V)) { if (C->isNullValue()) return OrZero; @@ -1644,10 +1644,10 @@ match(V, m_LShr(m_Value(X), m_Value())))) return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q); - if (const ZExtInst *ZI = dyn_cast(V)) + if (const auto *ZI = dyn_cast(V)) return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q); - if (const SelectInst *SI = dyn_cast(V)) + if (const auto *SI = dyn_cast(V)) return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) && isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q); @@ -1665,7 +1665,7 @@ // Adding a power-of-two or zero to the same power-of-two or zero yields // either the original power-of-two, a larger power-of-two or zero. if (match(V, m_Add(m_Value(X), m_Value(Y)))) { - const OverflowingBinaryOperator *VOBO = cast(V); + const auto *VOBO = cast(V); if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) { if (match(X, m_And(m_Specific(Y), m_Value())) || match(X, m_And(m_Value(), m_Specific(Y)))) @@ -1731,7 +1731,7 @@ GTI != GTE; ++GTI) { // Struct types are easy -- they must always be indexed by a constant. if (StructType *STy = GTI.getStructTypeOrNull()) { - ConstantInt *OpC = cast(GTI.getOperand()); + auto *OpC = cast(GTI.getOperand()); unsigned ElementIdx = OpC->getZExtValue(); const StructLayout *SL = Q.DL.getStructLayout(STy); uint64_t ElementOffset = SL->getElementOffset(ElementIdx); @@ -1746,7 +1746,7 @@ // Fast path the constant operand case both for efficiency and so we don't // increment Depth when just zipping down an all-constant GEP. - if (ConstantInt *OpC = dyn_cast(GTI.getOperand())) { + if (auto *OpC = dyn_cast(GTI.getOperand())) { if (!OpC->isZero()) return true; continue; @@ -1833,7 +1833,7 @@ if (V->getType()->isPointerTy()) { if (isKnownNonNull(V)) return true; - if (const GEPOperator *GEP = dyn_cast(V)) + if (const auto *GEP = dyn_cast(V)) if (isGEPKnownNonNull(GEP, Depth, Q)) return true; } @@ -1853,7 +1853,7 @@ // if the lowest bit is shifted off the end. if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) { // shl nuw can't remove any non-zero bits. - const OverflowingBinaryOperator *BO = cast(V); + const auto *BO = cast(V); if (BO->hasNoUnsignedWrap()) return isKnownNonZero(X, Depth, Q); @@ -1867,7 +1867,7 @@ // defined if the sign bit is shifted off the end. else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) { // shr exact can only shift out zero bits. - const PossiblyExactOperator *BO = cast(V); + const auto *BO = cast(V); if (BO->isExact()) return isKnownNonZero(X, Depth, Q); @@ -1879,7 +1879,7 @@ // If the shifter operand is a constant, and all of the bits shifted // out are known to be zero, and X is known non-zero then at least one // non-zero bit must remain. - if (ConstantInt *Shift = dyn_cast(Y)) { + if (auto *Shift = dyn_cast(Y)) { APInt KnownZero(BitWidth, 0); APInt KnownOne(BitWidth, 0); computeKnownBits(X, KnownZero, KnownOne, Depth, Q); @@ -1938,7 +1938,7 @@ } // X * Y. else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) { - const OverflowingBinaryOperator *BO = cast(V); + const auto *BO = cast(V); // If X and Y are non-zero then so is X * Y as long as the multiplication // does not overflow. if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) && @@ -1946,13 +1946,13 @@ return true; } // (C ? X : Y) != 0 if X != 0 and Y != 0. - else if (const SelectInst *SI = dyn_cast(V)) { + else if (const auto *SI = dyn_cast(V)) { if (isKnownNonZero(SI->getTrueValue(), Depth, Q) && isKnownNonZero(SI->getFalseValue(), Depth, Q)) return true; } // PHI - else if (const PHINode *PN = dyn_cast(V)) { + else if (const auto *PN = dyn_cast(V)) { // Try and detect a recurrence that monotonically increases from a // starting value, as these are common as induction variables. if (PN->getNumIncomingValues() == 2) { @@ -1960,7 +1960,7 @@ Value *Induction = PN->getIncomingValue(1); if (isa(Induction) && !isa(Start)) std::swap(Start, Induction); - if (ConstantInt *C = dyn_cast(Start)) { + if (auto *C = dyn_cast(Start)) { if (!C->isZero() && !C->isNegative()) { ConstantInt *X; if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) || @@ -1987,7 +1987,7 @@ /// Return true if V2 == V1 + X, where X is known non-zero. static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) { - const BinaryOperator *BO = dyn_cast(V1); + const auto *BO = dyn_cast(V1); if (!BO || BO->getOpcode() != Instruction::Add) return false; Value *Op = nullptr; @@ -2010,7 +2010,7 @@ if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q)) return true; - if (IntegerType *Ty = dyn_cast(V1->getType())) { + if (auto *Ty = dyn_cast(V1->getType())) { // Are any known bits in V1 contradictory to known bits in V2? If V1 // has a known zero where V2 has a known one, they must not be equal. auto BitWidth = Ty->getBitWidth(); @@ -2089,7 +2089,7 @@ if (Depth == 6) return 1; // Limit search depth. - const Operator *U = dyn_cast(V); + const auto *U = dyn_cast(V); switch (Operator::getOpcode(V)) { default: break; case Instruction::SExt: @@ -2247,7 +2247,7 @@ return std::min(Tmp, Tmp2)-1; case Instruction::PHI: { - const PHINode *PN = cast(U); + const auto *PN = cast(U); unsigned NumIncomingValues = PN->getNumIncomingValues(); // Don't analyze large in-degree PHIs. if (NumIncomingValues > 4) break; @@ -2315,7 +2315,7 @@ Type *T = V->getType(); - ConstantInt *CI = dyn_cast(V); + auto *CI = dyn_cast(V); if (Base == 0) return false; @@ -2325,7 +2325,7 @@ return true; } - ConstantExpr *CO = dyn_cast(V); + auto *CO = dyn_cast(V); Constant *BaseVal = ConstantInt::get(T, Base); if (CO && CO == BaseVal) { // Multiple is 1. @@ -2340,7 +2340,7 @@ if (Depth == MaxDepth) return false; // Limit search depth. - Operator *I = dyn_cast(V); + auto *I = dyn_cast(V); if (!I) return false; switch (I->getOpcode()) { @@ -2357,7 +2357,7 @@ Value *Op1 = I->getOperand(1); if (I->getOpcode() == Instruction::Shl) { - ConstantInt *Op1CI = dyn_cast(Op1); + auto *Op1CI = dyn_cast(Op1); if (!Op1CI) return false; // Turn Op0 << Op1 into Op0 * 2^Op1 APInt Op1Int = Op1CI->getValue(); @@ -2369,8 +2369,8 @@ Value *Mul0 = nullptr; if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) { - if (Constant *Op1C = dyn_cast(Op1)) - if (Constant *MulC = dyn_cast(Mul0)) { + if (auto *Op1C = dyn_cast(Op1)) + if (auto *MulC = dyn_cast(Mul0)) { if (Op1C->getType()->getPrimitiveSizeInBits() < MulC->getType()->getPrimitiveSizeInBits()) Op1C = ConstantExpr::getZExt(Op1C, MulC->getType()); @@ -2383,7 +2383,7 @@ return true; } - if (ConstantInt *Mul0CI = dyn_cast(Mul0)) + if (auto *Mul0CI = dyn_cast(Mul0)) if (Mul0CI->getValue() == 1) { // V == Base * Op1, so return Op1 Multiple = Op1; @@ -2393,8 +2393,8 @@ Value *Mul1 = nullptr; if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) { - if (Constant *Op0C = dyn_cast(Op0)) - if (Constant *MulC = dyn_cast(Mul1)) { + if (auto *Op0C = dyn_cast(Op0)) + if (auto *MulC = dyn_cast(Mul1)) { if (Op0C->getType()->getPrimitiveSizeInBits() < MulC->getType()->getPrimitiveSizeInBits()) Op0C = ConstantExpr::getZExt(Op0C, MulC->getType()); @@ -2407,7 +2407,7 @@ return true; } - if (ConstantInt *Mul1CI = dyn_cast(Mul1)) + if (auto *Mul1CI = dyn_cast(Mul1)) if (Mul1CI->getValue() == 1) { // V == Base * Op0, so return Op0 Multiple = Op0; @@ -2539,7 +2539,7 @@ /// bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI, unsigned Depth) { - if (const ConstantFP *CFP = dyn_cast(V)) + if (const auto *CFP = dyn_cast(V)) return !CFP->getValueAPF().isNegZero(); // FIXME: Magic number! At the least, this should be given a name because it's @@ -2548,17 +2548,17 @@ if (Depth == 6) return false; // Limit search depth. - const Operator *I = dyn_cast(V); + const auto *I = dyn_cast(V); if (!I) return false; // Check if the nsz fast-math flag is set - if (const FPMathOperator *FPO = dyn_cast(I)) + if (const auto *FPO = dyn_cast(I)) if (FPO->hasNoSignedZeros()) return true; // (add x, 0.0) is guaranteed to return +0.0, not -0.0. if (I->getOpcode() == Instruction::FAdd) - if (ConstantFP *CFP = dyn_cast(I->getOperand(1))) + if (auto *CFP = dyn_cast(I->getOperand(1))) if (CFP->isNullValue()) return true; @@ -2566,7 +2566,7 @@ if (isa(I) || isa(I)) return true; - if (const CallInst *CI = dyn_cast(I)) { + if (const auto *CI = dyn_cast(I)) { Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI); switch (IID) { default: @@ -2586,7 +2586,7 @@ bool llvm::CannotBeOrderedLessThanZero(const Value *V, const TargetLibraryInfo *TLI, unsigned Depth) { - if (const ConstantFP *CFP = dyn_cast(V)) + if (const auto *CFP = dyn_cast(V)) return !CFP->getValueAPF().isNegative() || CFP->getValueAPF().isZero(); // FIXME: Magic number! At the least, this should be given a name because it's @@ -2595,7 +2595,7 @@ if (Depth == 6) return false; // Limit search depth. - const Operator *I = dyn_cast(V); + const auto *I = dyn_cast(V); if (!I) return false; switch (I->getOpcode()) { @@ -2637,7 +2637,7 @@ case Intrinsic::sqrt: return true; case Intrinsic::powi: - if (ConstantInt *CI = dyn_cast(I->getOperand(1))) { + if (auto *CI = dyn_cast(I->getOperand(1))) { // powi(x,n) is non-negative if n is even. if (CI->getBitWidth() <= 64 && CI->getSExtValue() % 2u == 0) return true; @@ -2664,13 +2664,13 @@ if (V->getType()->isIntegerTy(8)) return V; // Handle 'null' ConstantArrayZero etc. - if (Constant *C = dyn_cast(V)) + if (auto *C = dyn_cast(V)) if (C->isNullValue()) return Constant::getNullValue(Type::getInt8Ty(V->getContext())); // Constant float and double values can be handled as integer values if the // corresponding integer value is "byteable". An important case is 0.0. - if (ConstantFP *CFP = dyn_cast(V)) { + if (auto *CFP = dyn_cast(V)) { if (CFP->getType()->isFloatTy()) V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext())); if (CFP->getType()->isDoubleTy()) @@ -2679,7 +2679,7 @@ } // We can handle constant integers that are multiple of 8 bits. - if (ConstantInt *CI = dyn_cast(V)) { + if (auto *CI = dyn_cast(V)) { if (CI->getBitWidth() % 8 == 0) { assert(CI->getBitWidth() > 8 && "8 bits should be handled above!"); @@ -2691,7 +2691,7 @@ // A ConstantDataArray/Vector is splatable if all its members are equal and // also splatable. - if (ConstantDataSequential *CA = dyn_cast(V)) { + if (auto *CA = dyn_cast(V)) { Value *Elt = CA->getElementAsConstant(0); Value *Val = isBytewiseValue(Elt); if (!Val) @@ -2724,7 +2724,7 @@ SmallVectorImpl &Idxs, unsigned IdxSkip, Instruction *InsertBefore) { - llvm::StructType *STy = dyn_cast(IndexedType); + auto *STy = dyn_cast(IndexedType); if (STy) { // Save the original To argument so we can modify it Value *OrigTo = To; @@ -2739,7 +2739,7 @@ if (!To) { // Couldn't find any inserted value for this index? Cleanup while (PrevTo != OrigTo) { - InsertValueInst* Del = cast(PrevTo); + auto * Del = cast(PrevTo); PrevTo = Del->getAggregateOperand(); Del->eraseFromParent(); } @@ -2809,13 +2809,13 @@ assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) && "Invalid indices for type?"); - if (Constant *C = dyn_cast(V)) { + if (auto *C = dyn_cast(V)) { C = C->getAggregateElement(idx_range[0]); if (!C) return nullptr; return FindInsertedValue(C, idx_range.slice(1), InsertBefore); } - if (InsertValueInst *I = dyn_cast(V)) { + if (auto *I = dyn_cast(V)) { // Loop the indices for the insertvalue instruction in parallel with the // requested indices const unsigned *req_idx = idx_range.begin(); @@ -2855,7 +2855,7 @@ InsertBefore); } - if (ExtractValueInst *I = dyn_cast(V)) { + if (auto *I = dyn_cast(V)) { // If we're extracting a value from an aggregate that was extracted from // something else, we can extract from that something else directly instead. // However, we will need to chain I's indices with the requested indices. @@ -2896,7 +2896,7 @@ if (Ptr->getType()->isVectorTy()) break; - if (GEPOperator *GEP = dyn_cast(Ptr)) { + if (auto *GEP = dyn_cast(Ptr)) { // If one of the values we have visited is an addrspacecast, then // the pointer type of this GEP may be different from the type // of the Ptr parameter which was passed to this function. This @@ -2913,7 +2913,7 @@ } else if (Operator::getOpcode(Ptr) == Instruction::BitCast || Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) { Ptr = cast(Ptr)->getOperand(0); - } else if (GlobalAlias *GA = dyn_cast(Ptr)) { + } else if (auto *GA = dyn_cast(Ptr)) { if (GA->isInterposable()) break; Ptr = GA->getAliasee(); @@ -2931,13 +2931,13 @@ return false; // Make sure the index-ee is a pointer to array of i8. - ArrayType *AT = dyn_cast(GEP->getSourceElementType()); + auto *AT = dyn_cast(GEP->getSourceElementType()); if (!AT || !AT->getElementType()->isIntegerTy(8)) return false; // Check to make sure that the first operand of the GEP is an integer and // has value 0 so that we are sure we're indexing into the initializer. - const ConstantInt *FirstIdx = dyn_cast(GEP->getOperand(1)); + const auto *FirstIdx = dyn_cast(GEP->getOperand(1)); if (!FirstIdx || !FirstIdx->isZero()) return false; @@ -2956,7 +2956,7 @@ // If the value is a GEP instruction or constant expression, treat it as an // offset. - if (const GEPOperator *GEP = dyn_cast(V)) { + if (const auto *GEP = dyn_cast(V)) { // The GEP operator should be based on a pointer to string constant, and is // indexing into the string constant. if (!isGEPBasedOnPointerToString(GEP)) @@ -2966,7 +2966,7 @@ // into the array. If this occurs, we can't say anything meaningful about // the string. uint64_t StartIdx = 0; - if (const ConstantInt *CI = dyn_cast(GEP->getOperand(2))) + if (const auto *CI = dyn_cast(GEP->getOperand(2))) StartIdx = CI->getZExtValue(); else return false; @@ -2977,7 +2977,7 @@ // The GEP instruction, constant or instruction, must reference a global // variable that is a constant and is initialized. The referenced constant // initializer is the array that we'll use for optimization. - const GlobalVariable *GV = dyn_cast(V); + const auto *GV = dyn_cast(V); if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) return false; @@ -3028,7 +3028,7 @@ // If this is a PHI node, there are two cases: either we have already seen it // or we haven't. - if (const PHINode *PN = dyn_cast(V)) { + if (const auto *PN = dyn_cast(V)) { if (!PHIs.insert(PN).second) return ~0ULL; // already in the set. @@ -3050,7 +3050,7 @@ } // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y) - if (const SelectInst *SI = dyn_cast(V)) { + if (const auto *SI = dyn_cast(V)) { uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs); if (Len1 == 0) return 0; uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs); @@ -3113,12 +3113,12 @@ if (!V->getType()->isPointerTy()) return V; for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) { - if (GEPOperator *GEP = dyn_cast(V)) { + if (auto *GEP = dyn_cast(V)) { V = GEP->getPointerOperand(); } else if (Operator::getOpcode(V) == Instruction::BitCast || Operator::getOpcode(V) == Instruction::AddrSpaceCast) { V = cast(V)->getOperand(0); - } else if (GlobalAlias *GA = dyn_cast(V)) { + } else if (auto *GA = dyn_cast(V)) { if (GA->isInterposable()) return V; V = GA->getAliasee(); @@ -3130,7 +3130,7 @@ } // See if InstructionSimplify knows any relevant tricks. - if (Instruction *I = dyn_cast(V)) + if (auto *I = dyn_cast(V)) // TODO: Acquire a DominatorTree and AssumptionCache and use them. if (Value *Simplified = SimplifyInstruction(I, DL, nullptr)) { V = Simplified; @@ -3157,13 +3157,13 @@ if (!Visited.insert(P).second) continue; - if (SelectInst *SI = dyn_cast(P)) { + if (auto *SI = dyn_cast(P)) { Worklist.push_back(SI->getTrueValue()); Worklist.push_back(SI->getFalseValue()); continue; } - if (PHINode *PN = dyn_cast(P)) { + if (auto *PN = dyn_cast(P)) { // If this PHI changes the underlying object in every iteration of the // loop, don't look through it. Consider: // int **A; @@ -3188,7 +3188,7 @@ /// Return true if the only users of this pointer are lifetime markers. bool llvm::onlyUsedByLifetimeMarkers(const Value *V) { for (const User *U : V->users()) { - const IntrinsicInst *II = dyn_cast(U); + const auto *II = dyn_cast(U); if (!II) return false; if (II->getIntrinsicID() != Intrinsic::lifetime_start && @@ -3201,12 +3201,12 @@ bool llvm::isSafeToSpeculativelyExecute(const Value *V, const Instruction *CtxI, const DominatorTree *DT) { - const Operator *Inst = dyn_cast(V); + const auto *Inst = dyn_cast(V); if (!Inst) return false; for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) - if (Constant *C = dyn_cast(Inst->getOperand(i))) + if (auto *C = dyn_cast(Inst->getOperand(i))) if (C->canTrap()) return false; @@ -3241,7 +3241,7 @@ return false; } case Instruction::Load: { - const LoadInst *LI = cast(Inst); + const auto *LI = cast(Inst); if (!LI->isUnordered() || // Speculative load may create a race that did not exist in the source. LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) || @@ -3253,7 +3253,7 @@ LI->getAlignment(), DL, CtxI, DT); } case Instruction::Call: { - if (const IntrinsicInst *II = dyn_cast(Inst)) { + if (const auto *II = dyn_cast(Inst)) { switch (II->getIntrinsicID()) { // These synthetic intrinsics have no side-effects and just mark // information about their operands. @@ -3342,18 +3342,18 @@ if (isa(V)) return true; // A byval, inalloca, or nonnull argument is never null. - if (const Argument *A = dyn_cast(V)) + if (const auto *A = dyn_cast(V)) return A->hasByValOrInAllocaAttr() || A->hasNonNullAttr(); // A global variable in address space 0 is non null unless extern weak // or an absolute symbol reference. Other address spaces may have null as a // valid address for a global, so we can't assume anything. - if (const GlobalValue *GV = dyn_cast(V)) + if (const auto *GV = dyn_cast(V)) return !GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() && GV->getType()->getAddressSpace() == 0; // A Load tagged with nonnull metadata is never null. - if (const LoadInst *LI = dyn_cast(V)) + if (const auto *LI = dyn_cast(V)) return LI->getMetadata(LLVMContext::MD_nonnull); if (auto CS = ImmutableCallSite(V)) @@ -3383,7 +3383,7 @@ continue; for (auto *CmpU : U->users()) { - if (const BranchInst *BI = dyn_cast(CmpU)) { + if (const auto *BI = dyn_cast(CmpU)) { assert(BI->isConditional() && "uses a comparison!"); BasicBlock *NonNullSuccessor = @@ -3629,15 +3629,15 @@ // An atomic operation isn't guaranteed to return in a reasonable amount of // time because it's possible for another thread to interfere with it for an // arbitrary length of time, but programs aren't allowed to rely on that. - if (const LoadInst *LI = dyn_cast(I)) + if (const auto *LI = dyn_cast(I)) return !LI->isVolatile(); - if (const StoreInst *SI = dyn_cast(I)) + if (const auto *SI = dyn_cast(I)) return !SI->isVolatile(); - if (const AtomicCmpXchgInst *CXI = dyn_cast(I)) + if (const auto *CXI = dyn_cast(I)) return !CXI->isVolatile(); - if (const AtomicRMWInst *RMWI = dyn_cast(I)) + if (const auto *RMWI = dyn_cast(I)) return !RMWI->isVolatile(); - if (const MemIntrinsic *MII = dyn_cast(I)) + if (const auto *MII = dyn_cast(I)) return !MII->isVolatile(); // If there is no successor, then execution can't transfer to it. @@ -3816,7 +3816,7 @@ // Mark poison that propagates from I through uses of I. if (YieldsPoison.count(&I)) { for (const User *User : I.users()) { - const Instruction *UserI = cast(User); + const auto *UserI = cast(User); if (propagatesFullPoison(UserI)) YieldsPoison.insert(User); } @@ -4052,8 +4052,8 @@ static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2, Instruction::CastOps *CastOp) { - CastInst *CI = dyn_cast(V1); - Constant *C = dyn_cast(V2); + auto *CI = dyn_cast(V1); + auto *C = dyn_cast(V2); if (!CI) return nullptr; *CastOp = CI->getOpcode(); @@ -4112,10 +4112,10 @@ SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp) { - SelectInst *SI = dyn_cast(V); + auto *SI = dyn_cast(V); if (!SI) return {SPF_UNKNOWN, SPNB_NA, false}; - CmpInst *CmpI = dyn_cast(SI->getCondition()); + auto *CmpI = dyn_cast(SI->getCondition()); if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false}; CmpInst::Predicate Pred = CmpI->getPredicate(); Index: lib/Analysis/VectorUtils.cpp =================================================================== --- lib/Analysis/VectorUtils.cpp +++ lib/Analysis/VectorUtils.cpp @@ -123,7 +123,7 @@ /// getGEPInductionOperand. However, if there is some other non-loop-invariant /// operand, it returns that instead. Value *llvm::stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp) { - GetElementPtrInst *GEP = dyn_cast(Ptr); + auto *GEP = dyn_cast(Ptr); if (!GEP) return Ptr; @@ -142,7 +142,7 @@ Value *llvm::getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty) { Value *UniqueCast = nullptr; for (User *U : Ptr->users()) { - CastInst *CI = dyn_cast(U); + auto *CI = dyn_cast(U); if (CI && CI->getType() == Ty) { if (!UniqueCast) UniqueCast = CI; @@ -173,10 +173,10 @@ if (Ptr != OrigPtr) // Strip off casts. - while (const SCEVCastExpr *C = dyn_cast(V)) + while (const auto *C = dyn_cast(V)) V = C->getOperand(); - const SCEVAddRecExpr *S = dyn_cast(V); + const auto *S = dyn_cast(V); if (!S) return nullptr; @@ -187,7 +187,7 @@ // Strip off the size of access multiplication if we are still analyzing the // pointer. if (OrigPtr == Ptr) { - if (const SCEVMulExpr *M = dyn_cast(V)) { + if (const auto *M = dyn_cast(V)) { if (M->getOperand(0)->getSCEVType() != scConstant) return nullptr; @@ -206,13 +206,13 @@ // Strip off casts. Type *StripedOffRecurrenceCast = nullptr; - if (const SCEVCastExpr *C = dyn_cast(V)) { + if (const auto *C = dyn_cast(V)) { StripedOffRecurrenceCast = C->getType(); V = C->getOperand(); } // Look for the loop invariant symbolic value. - const SCEVUnknown *U = dyn_cast(V); + const auto *U = dyn_cast(V); if (!U) return nullptr; @@ -233,15 +233,15 @@ /// from the vector. Value *llvm::findScalarElement(Value *V, unsigned EltNo) { assert(V->getType()->isVectorTy() && "Not looking at a vector?"); - VectorType *VTy = cast(V->getType()); + auto *VTy = cast(V->getType()); unsigned Width = VTy->getNumElements(); if (EltNo >= Width) // Out of range access. return UndefValue::get(VTy->getElementType()); - if (Constant *C = dyn_cast(V)) + if (auto *C = dyn_cast(V)) return C->getAggregateElement(EltNo); - if (InsertElementInst *III = dyn_cast(V)) { + if (auto *III = dyn_cast(V)) { // If this is an insert to a variable element, we don't know what it is. if (!isa(III->getOperand(2))) return nullptr; @@ -257,7 +257,7 @@ return findScalarElement(III->getOperand(0), EltNo); } - if (ShuffleVectorInst *SVI = dyn_cast(V)) { + if (auto *SVI = dyn_cast(V)) { unsigned LHSWidth = SVI->getOperand(0)->getType()->getVectorNumElements(); int InEl = SVI->getMaskValue(EltNo); if (InEl < 0) @@ -360,7 +360,7 @@ // Non-instructions terminate a chain successfully. if (!isa(Val)) continue; - Instruction *I = cast(Val); + auto *I = cast(Val); // If we encounter a type that is larger than 64 bits, we can't represent // it so bail out. @@ -450,7 +450,7 @@ /// \returns \p I after propagating metadata from \p VL. Instruction *llvm::propagateMetadata(Instruction *Inst, ArrayRef VL) { - Instruction *I0 = cast(VL[0]); + auto *I0 = cast(VL[0]); SmallVector, 4> Metadata; I0->getAllMetadataOtherThanDebugLoc(Metadata); @@ -461,7 +461,7 @@ MDNode *MD = I0->getMetadata(Kind); for (int J = 1, E = VL.size(); MD && J != E; ++J) { - const Instruction *IJ = cast(VL[J]); + const auto *IJ = cast(VL[J]); MDNode *IMD = IJ->getMetadata(Kind); switch (Kind) { case LLVMContext::MD_tbaa: Index: lib/AsmParser/LLParser.cpp =================================================================== --- lib/AsmParser/LLParser.cpp +++ lib/AsmParser/LLParser.cpp @@ -132,7 +132,7 @@ VI != VE; ++VI) B.merge(NumberedAttrBuilders[*VI]); - if (Function *Fn = dyn_cast(V)) { + if (auto *Fn = dyn_cast(V)) { AttributeSet AS = Fn->getAttributes(); AttrBuilder FnAttrs(AS.getFnAttributes(), AttributeSet::FunctionIndex); AS = AS.removeAttributes(Context, AttributeSet::FunctionIndex, @@ -152,7 +152,7 @@ AttributeSet::FunctionIndex, FnAttrs)); Fn->setAttributes(AS); - } else if (CallInst *CI = dyn_cast(V)) { + } else if (auto *CI = dyn_cast(V)) { AttributeSet AS = CI->getAttributes(); AttrBuilder FnAttrs(AS.getFnAttributes(), AttributeSet::FunctionIndex); AS = AS.removeAttributes(Context, AttributeSet::FunctionIndex, @@ -163,7 +163,7 @@ AttributeSet::FunctionIndex, FnAttrs)); CI->setAttributes(AS); - } else if (InvokeInst *II = dyn_cast(V)) { + } else if (auto *II = dyn_cast(V)) { AttributeSet AS = II->getAttributes(); AttrBuilder FnAttrs(AS.getFnAttributes(), AttributeSet::FunctionIndex); AS = AS.removeAttributes(Context, AttributeSet::FunctionIndex, @@ -1171,14 +1171,14 @@ /// exists but does not have the right type. GlobalValue *LLParser::GetGlobalVal(const std::string &Name, Type *Ty, LocTy Loc) { - PointerType *PTy = dyn_cast(Ty); + auto *PTy = dyn_cast(Ty); if (!PTy) { Error(Loc, "global variable reference must have pointer type"); return nullptr; } // Look this name up in the normal function symbol table. - GlobalValue *Val = + auto *Val = cast_or_null(M->getValueSymbolTable().lookup(Name)); // If this is a forward reference for the value, see if we already created a @@ -1204,7 +1204,7 @@ } GlobalValue *LLParser::GetGlobalVal(unsigned ID, Type *Ty, LocTy Loc) { - PointerType *PTy = dyn_cast(Ty); + auto *PTy = dyn_cast(Ty); if (!PTy) { Error(Loc, "global variable reference must have pointer type"); return nullptr; @@ -2365,7 +2365,7 @@ if (!Entry.first) Entry.first = StructType::create(Context, Name); - StructType *STy = cast(Entry.first); + auto *STy = cast(Entry.first); SmallVector Body; if (ParseStructBody(Body) || @@ -4511,7 +4511,7 @@ return false; case ValID::t_ConstantStruct: case ValID::t_PackedConstantStruct: - if (StructType *ST = dyn_cast(Ty)) { + if (auto *ST = dyn_cast(Ty)) { if (ST->getNumElements() != ID.UIntVal) return Error(ID.Loc, "initializer with struct type has wrong # elements"); @@ -5170,7 +5170,7 @@ BasicBlock *Op1, *Op2; if (ParseTypeAndValue(Op0, Loc, PFS)) return true; - if (BasicBlock *BB = dyn_cast(Op0)) { + if (auto *BB = dyn_cast(Op0)) { Inst = BranchInst::Create(BB); return false; } @@ -5306,7 +5306,7 @@ // If RetType is a non-function pointer type, then this is the short syntax // for the call, which means that RetType is just the return type. Infer the // rest of the function argument types from the arguments that are present. - FunctionType *Ty = dyn_cast(RetType); + auto *Ty = dyn_cast(RetType); if (!Ty) { // Pull out the types of all of the arguments... std::vector ParamTypes; @@ -5855,7 +5855,7 @@ Error(VLoc, "'filter' clause has an invalid type"); } - Constant *CV = dyn_cast(V); + auto *CV = dyn_cast(V); if (!CV) return Error(VLoc, "clause argument must be a constant"); LP->addClause(CV); @@ -5910,7 +5910,7 @@ // If RetType is a non-function pointer type, then this is the short syntax // for the call, which means that RetType is just the return type. Infer the // rest of the function argument types from the arguments that are present. - FunctionType *Ty = dyn_cast(RetType); + auto *Ty = dyn_cast(RetType); if (!Ty) { // Pull out the types of all of the arguments... std::vector ParamTypes; @@ -6269,7 +6269,7 @@ return true; Type *BaseType = Ptr->getType(); - PointerType *BasePointerType = dyn_cast(BaseType->getScalarType()); + auto *BasePointerType = dyn_cast(BaseType->getScalarType()); if (!BasePointerType) return Error(Loc, "base of getelementptr must be a pointer"); Index: lib/Bitcode/Reader/BitcodeReader.cpp =================================================================== --- lib/Bitcode/Reader/BitcodeReader.cpp +++ lib/Bitcode/Reader/BitcodeReader.cpp @@ -1171,7 +1171,7 @@ // Handle constants and non-constants (e.g. instrs) differently for // efficiency. - if (Constant *PHC = dyn_cast(&*OldV)) { + if (auto *PHC = dyn_cast(&*OldV)) { ResolveConstants.push_back(std::make_pair(PHC, Idx)); OldV = V; } else { @@ -1257,7 +1257,7 @@ // Otherwise, we have a constant that uses the placeholder. Replace that // constant with a new constant that has *all* placeholder uses updated. - Constant *UserC = cast(U); + auto *UserC = cast(U); for (User::op_iterator I = UserC->op_begin(), E = UserC->op_end(); I != E; ++I) { Value *NewOp; @@ -1282,9 +1282,9 @@ // Make the new constant. Constant *NewC; - if (ConstantArray *UserCA = dyn_cast(UserC)) { + if (auto *UserCA = dyn_cast(UserC)) { NewC = ConstantArray::get(UserCA->getType(), NewOps); - } else if (ConstantStruct *UserCS = dyn_cast(UserC)) { + } else if (auto *UserCS = dyn_cast(UserC)) { NewC = ConstantStruct::get(UserCS->getType(), NewOps); } else if (isa(UserC)) { NewC = ConstantVector::get(NewOps); @@ -2062,7 +2062,7 @@ return error("Invalid TYPE table"); // Check to see if this was forward referenced, if so fill in the temp. - StructType *Res = cast_or_null(TypeList[NumRecords]); + auto *Res = cast_or_null(TypeList[NumRecords]); if (Res) { Res->setName(TypeName); TypeList[NumRecords] = nullptr; @@ -2091,7 +2091,7 @@ return error("Invalid TYPE table"); // Check to see if this was forward referenced, if so fill in the temp. - StructType *Res = cast_or_null(TypeList[NumRecords]); + auto *Res = cast_or_null(TypeList[NumRecords]); if (Res) { Res->setName(TypeName); TypeList[NumRecords] = nullptr; @@ -2298,7 +2298,7 @@ // before the start of the identification or module block, which was // historically always the start of the regular bitcode header. uint64_t FuncWordOffset = Record[1] - 1; - Function *F = dyn_cast(GO); + auto *F = dyn_cast(GO); assert(F); uint64_t FuncBitOffset = FuncWordOffset * 32; DeferredFunctionInfo[F] = FuncBitOffset + FuncBitcodeOffsetDelta; @@ -3149,7 +3149,7 @@ // Not ready to resolve this yet, it requires something later in the file. GlobalInits.push_back(GlobalInitWorklist.back()); } else { - if (Constant *C = dyn_cast_or_null(ValueList[ValID])) + if (auto *C = dyn_cast_or_null(ValueList[ValID])) GlobalInitWorklist.back().first->setInitializer(C); else return error("Expected a constant"); @@ -3162,7 +3162,7 @@ if (ValID >= ValueList.size()) { IndirectSymbolInits.push_back(IndirectSymbolInitWorklist.back()); } else { - Constant *C = dyn_cast_or_null(ValueList[ValID]); + auto *C = dyn_cast_or_null(ValueList[ValID]); if (!C) return error("Expected a constant"); GlobalIndirectSymbol *GIS = IndirectSymbolInitWorklist.back().first; @@ -3178,7 +3178,7 @@ if (ValID >= ValueList.size()) { FunctionPrefixes.push_back(FunctionPrefixWorklist.back()); } else { - if (Constant *C = dyn_cast_or_null(ValueList[ValID])) + if (auto *C = dyn_cast_or_null(ValueList[ValID])) FunctionPrefixWorklist.back().first->setPrefixData(C); else return error("Expected a constant"); @@ -3191,7 +3191,7 @@ if (ValID >= ValueList.size()) { FunctionPrologues.push_back(FunctionPrologueWorklist.back()); } else { - if (Constant *C = dyn_cast_or_null(ValueList[ValID])) + if (auto *C = dyn_cast_or_null(ValueList[ValID])) FunctionPrologueWorklist.back().first->setPrologueData(C); else return error("Expected a constant"); @@ -3204,7 +3204,7 @@ if (ValID >= ValueList.size()) { FunctionPersonalityFns.push_back(FunctionPersonalityFnWorklist.back()); } else { - if (Constant *C = dyn_cast_or_null(ValueList[ValID])) + if (auto *C = dyn_cast_or_null(ValueList[ValID])) FunctionPersonalityFnWorklist.back().first->setPersonalityFn(C); else return error("Expected a constant"); @@ -3327,17 +3327,17 @@ unsigned Size = Record.size(); SmallVector Elts; - if (StructType *STy = dyn_cast(CurTy)) { + if (auto *STy = dyn_cast(CurTy)) { for (unsigned i = 0; i != Size; ++i) Elts.push_back(ValueList.getConstantFwdRef(Record[i], STy->getElementType(i))); V = ConstantStruct::get(STy, Elts); - } else if (ArrayType *ATy = dyn_cast(CurTy)) { + } else if (auto *ATy = dyn_cast(CurTy)) { Type *EltTy = ATy->getElementType(); for (unsigned i = 0; i != Size; ++i) Elts.push_back(ValueList.getConstantFwdRef(Record[i], EltTy)); V = ConstantArray::get(ATy, Elts); - } else if (VectorType *VTy = dyn_cast(CurTy)) { + } else if (auto *VTy = dyn_cast(CurTy)) { Type *EltTy = VTy->getElementType(); for (unsigned i = 0; i != Size; ++i) Elts.push_back(ValueList.getConstantFwdRef(Record[i], EltTy)); @@ -3506,7 +3506,7 @@ // The selector might be an i1 or an // Get the type from the ValueList before getting a forward ref. - if (VectorType *VTy = dyn_cast(CurTy)) + if (auto *VTy = dyn_cast(CurTy)) if (Value *V = ValueList[Record[0]]) if (SelectorTy != V->getType()) SelectorTy = VectorType::get(SelectorTy, VTy->getNumElements()); @@ -3521,7 +3521,7 @@ : { // CE_EXTRACTELT: [opty, opval, opty, opval] if (Record.size() < 3) return error("Invalid record"); - VectorType *OpTy = + auto *OpTy = dyn_cast_or_null(getTypeByID(Record[0])); if (!OpTy) return error("Invalid record"); @@ -3541,7 +3541,7 @@ } case bitc::CST_CODE_CE_INSERTELT : { // CE_INSERTELT: [opval, opval, opty, opval] - VectorType *OpTy = dyn_cast(CurTy); + auto *OpTy = dyn_cast(CurTy); if (Record.size() < 3 || !OpTy) return error("Invalid record"); Constant *Op0 = ValueList.getConstantFwdRef(Record[0], OpTy); @@ -3561,7 +3561,7 @@ break; } case bitc::CST_CODE_CE_SHUFFLEVEC: { // CE_SHUFFLEVEC: [opval, opval, opval] - VectorType *OpTy = dyn_cast(CurTy); + auto *OpTy = dyn_cast(CurTy); if (Record.size() < 3 || !OpTy) return error("Invalid record"); Constant *Op0 = ValueList.getConstantFwdRef(Record[0], OpTy); @@ -3573,8 +3573,8 @@ break; } case bitc::CST_CODE_CE_SHUFVEC_EX: { // [opty, opval, opval, opval] - VectorType *RTy = dyn_cast(CurTy); - VectorType *OpTy = + auto *RTy = dyn_cast(CurTy); + auto *OpTy = dyn_cast_or_null(getTypeByID(Record[0])); if (Record.size() < 4 || !RTy || !OpTy) return error("Invalid record"); @@ -3620,7 +3620,7 @@ AsmStr += (char)Record[2+i]; for (unsigned i = 0; i != ConstStrSize; ++i) ConstrStr += (char)Record[3+AsmStrSize+i]; - PointerType *PTy = cast(CurTy); + auto *PTy = cast(CurTy); V = InlineAsm::get(cast(PTy->getElementType()), AsmStr, ConstrStr, HasSideEffects, IsAlignStack); break; @@ -3645,7 +3645,7 @@ AsmStr += (char)Record[2+i]; for (unsigned i = 0; i != ConstStrSize; ++i) ConstrStr += (char)Record[3+AsmStrSize+i]; - PointerType *PTy = cast(CurTy); + auto *PTy = cast(CurTy); V = InlineAsm::get(cast(PTy->getElementType()), AsmStr, ConstrStr, HasSideEffects, IsAlignStack, InlineAsm::AsmDialect(AsmDialect)); @@ -3657,7 +3657,7 @@ Type *FnTy = getTypeByID(Record[0]); if (!FnTy) return error("Invalid record"); - Function *Fn = + auto *Fn = dyn_cast_or_null(ValueList.getConstantFwdRef(Record[1],FnTy)); if (!Fn) return error("Invalid record"); @@ -4439,7 +4439,7 @@ // Drop the attachment. This used to be legal, but there's no // upgrade path. break; - MDNode *MD = dyn_cast_or_null(Node); + auto *MD = dyn_cast_or_null(Node); if (!MD) return error("Invalid metadata attachment"); @@ -4847,7 +4847,7 @@ return error("Invalid record"); // select condition can be either i1 or [N x i1] - if (VectorType* vector_type = + if (auto * vector_type = dyn_cast(Cond->getType())) { // expect if (vector_type->getElementType() != Type::getInt1Ty(Context)) @@ -5162,7 +5162,7 @@ SwitchInst *SI = SwitchInst::Create(Cond, Default, NumCases); InstructionList.push_back(SI); for (unsigned i = 0, e = NumCases; i != e; ++i) { - ConstantInt *CaseVal = + auto *CaseVal = dyn_cast_or_null(getFnValueByID(Record[3+i*2], OpTy)); BasicBlock *DestBB = getBasicBlock(Record[1+3+i*2]); if (!CaseVal || !DestBB) { @@ -5215,7 +5215,7 @@ if (getValueTypePair(Record, OpNum, NextValueNo, Callee)) return error("Invalid record"); - PointerType *CalleeTy = dyn_cast(Callee->getType()); + auto *CalleeTy = dyn_cast(Callee->getType()); if (!CalleeTy) return error("Callee is not a pointer"); if (!FTy) { @@ -5605,7 +5605,7 @@ if (getValueTypePair(Record, OpNum, NextValueNo, Callee)) return error("Invalid record"); - PointerType *OpTy = dyn_cast(Callee->getType()); + auto *OpTy = dyn_cast(Callee->getType()); if (!OpTy) return error("Callee is not a pointer type"); if (!FTy) { @@ -5730,7 +5730,7 @@ return error("Operand bundles found with no consumer"); // Check the function list for unresolved values. - if (Argument *A = dyn_cast(ValueList.back())) { + if (auto *A = dyn_cast(ValueList.back())) { if (!A->getParent()) { // We found at least one unresolved value. Nuke them all to avoid leaks. for (unsigned i = ModuleValueListSize, e = ValueList.size(); i != e; ++i){ @@ -5777,7 +5777,7 @@ //===----------------------------------------------------------------------===// Error BitcodeReader::materialize(GlobalValue *GV) { - Function *F = dyn_cast(GV); + auto *F = dyn_cast(GV); // If it's not a function or is already material, ignore the request. if (!F || !F->isMaterializable()) return Error::success(); @@ -5810,7 +5810,7 @@ UI != UE;) { User *U = *UI; ++UI; - if (CallInst *CI = dyn_cast(U)) + if (auto *CI = dyn_cast(U)) UpgradeIntrinsicCall(CI, I.second); } } @@ -5864,7 +5864,7 @@ // with calls to the old function. for (auto &I : UpgradedIntrinsics) { for (auto *U : I.first->users()) { - if (CallInst *CI = dyn_cast(U)) + if (auto *CI = dyn_cast(U)) UpgradeIntrinsicCall(CI, I.second); } if (!I.first->use_empty()) Index: lib/Bitcode/Writer/BitcodeWriter.cpp =================================================================== --- lib/Bitcode/Writer/BitcodeWriter.cpp +++ lib/Bitcode/Writer/BitcodeWriter.cpp @@ -851,7 +851,7 @@ TypeVals.push_back(cast(T)->getBitWidth()); break; case Type::PointerTyID: { - PointerType *PTy = cast(T); + auto *PTy = cast(T); // POINTER: [pointee type, address space] Code = bitc::TYPE_CODE_POINTER; TypeVals.push_back(VE.getTypeID(PTy->getElementType())); @@ -861,7 +861,7 @@ break; } case Type::FunctionTyID: { - FunctionType *FT = cast(T); + auto *FT = cast(T); // FUNCTION: [isvararg, retty, paramty x N] Code = bitc::TYPE_CODE_FUNCTION; TypeVals.push_back(FT->isVarArg()); @@ -872,7 +872,7 @@ break; } case Type::StructTyID: { - StructType *ST = cast(T); + auto *ST = cast(T); // STRUCT: [ispacked, eltty x N] TypeVals.push_back(ST->isPacked()); // Output all of the element types. @@ -899,7 +899,7 @@ break; } case Type::ArrayTyID: { - ArrayType *AT = cast(T); + auto *AT = cast(T); // ARRAY: [numelts, eltty] Code = bitc::TYPE_CODE_ARRAY; TypeVals.push_back(AT->getNumElements()); @@ -908,7 +908,7 @@ break; } case Type::VectorTyID: { - VectorType *VT = cast(T); + auto *VT = cast(T); // VECTOR [numelts, eltty] Code = bitc::TYPE_CODE_VECTOR; TypeVals.push_back(VT->getNumElements()); @@ -1847,7 +1847,7 @@ #include "llvm/IR/Metadata.def" for (const Metadata *MD : MDs) { - if (const MDNode *N = dyn_cast(MD)) { + if (const auto *N = dyn_cast(MD)) { assert(N->isResolved() && "Expected forward references to be resolved"); switch (N->getMetadataID()) { @@ -2061,7 +2061,7 @@ Record.clear(); } - if (const InlineAsm *IA = dyn_cast(V)) { + if (const auto *IA = dyn_cast(V)) { Record.push_back(unsigned(IA->hasSideEffects()) | unsigned(IA->isAlignStack()) << 1 | unsigned(IA->getDialect()&1) << 2); @@ -2079,14 +2079,14 @@ Record.clear(); continue; } - const Constant *C = cast(V); + const auto *C = cast(V); unsigned Code = -1U; unsigned AbbrevToUse = 0; if (C->isNullValue()) { Code = bitc::CST_CODE_NULL; } else if (isa(C)) { Code = bitc::CST_CODE_UNDEF; - } else if (const ConstantInt *IV = dyn_cast(C)) { + } else if (const auto *IV = dyn_cast(C)) { if (IV->getBitWidth() <= 64) { uint64_t V = IV->getSExtValue(); emitSignedInt64(Record, V); @@ -2104,7 +2104,7 @@ } Code = bitc::CST_CODE_WIDE_INTEGER; } - } else if (const ConstantFP *CFP = dyn_cast(C)) { + } else if (const auto *CFP = dyn_cast(C)) { Code = bitc::CST_CODE_FLOAT; Type *Ty = CFP->getType(); if (Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy()) { @@ -2126,7 +2126,7 @@ } } else if (isa(C) && cast(C)->isString()) { - const ConstantDataSequential *Str = cast(C); + const auto *Str = cast(C); // Emit constant strings specially. unsigned NumElts = Str->getNumElements(); // If this is a null-terminated string, use the denser CSTRING encoding. @@ -2151,7 +2151,7 @@ AbbrevToUse = CString6Abbrev; else if (isCStr7) AbbrevToUse = CString7Abbrev; - } else if (const ConstantDataSequential *CDS = + } else if (const auto *CDS = dyn_cast(C)) { Code = bitc::CST_CODE_DATA; Type *EltTy = CDS->getType()->getElementType(); @@ -2168,7 +2168,7 @@ for (const Value *Op : C->operands()) Record.push_back(VE.getValueID(Op)); AbbrevToUse = AggregateAbbrev; - } else if (const ConstantExpr *CE = dyn_cast(C)) { + } else if (const auto *CE = dyn_cast(C)) { switch (CE->getOpcode()) { default: if (Instruction::isCast(CE->getOpcode())) { @@ -2247,7 +2247,7 @@ Record.push_back(CE->getPredicate()); break; } - } else if (const BlockAddress *BA = dyn_cast(C)) { + } else if (const auto *BA = dyn_cast(C)) { Code = bitc::CST_CODE_BLOCKADDRESS; Record.push_back(VE.getTypeID(BA->getFunction()->getType())); Record.push_back(VE.getValueID(BA->getFunction())); @@ -2374,7 +2374,7 @@ case Instruction::ExtractValue: { Code = bitc::FUNC_CODE_INST_EXTRACTVAL; pushValueAndType(I.getOperand(0), InstID, Vals); - const ExtractValueInst *EVI = cast(&I); + const auto *EVI = cast(&I); Vals.append(EVI->idx_begin(), EVI->idx_end()); break; } @@ -2382,7 +2382,7 @@ Code = bitc::FUNC_CODE_INST_INSERTVAL; pushValueAndType(I.getOperand(0), InstID, Vals); pushValueAndType(I.getOperand(1), InstID, Vals); - const InsertValueInst *IVI = cast(&I); + const auto *IVI = cast(&I); Vals.append(IVI->idx_begin(), IVI->idx_end()); break; } @@ -2440,7 +2440,7 @@ case Instruction::Br: { Code = bitc::FUNC_CODE_INST_BR; - const BranchInst &II = cast(I); + const auto &II = cast(I); Vals.push_back(VE.getValueID(II.getSuccessor(0))); if (II.isConditional()) { Vals.push_back(VE.getValueID(II.getSuccessor(1))); @@ -2451,7 +2451,7 @@ case Instruction::Switch: { Code = bitc::FUNC_CODE_INST_SWITCH; - const SwitchInst &SI = cast(I); + const auto &SI = cast(I); Vals.push_back(VE.getTypeID(SI.getCondition()->getType())); pushValue(SI.getCondition(), InstID, Vals); Vals.push_back(VE.getValueID(SI.getDefaultDest())); @@ -2471,7 +2471,7 @@ break; case Instruction::Invoke: { - const InvokeInst *II = cast(&I); + const auto *II = cast(&I); const Value *Callee = II->getCalledValue(); FunctionType *FTy = II->getFunctionType(); @@ -2552,7 +2552,7 @@ break; case Instruction::PHI: { - const PHINode &PN = cast(I); + const auto &PN = cast(I); Code = bitc::FUNC_CODE_INST_PHI; // With the newer instruction encoding, forward references could give // negative valued IDs. This is most common for PHIs, so we use @@ -2570,7 +2570,7 @@ } case Instruction::LandingPad: { - const LandingPadInst &LP = cast(I); + const auto &LP = cast(I); Code = bitc::FUNC_CODE_INST_LANDINGPAD; Vals.push_back(VE.getTypeID(LP.getType())); Vals.push_back(LP.isCleanup()); @@ -2587,7 +2587,7 @@ case Instruction::Alloca: { Code = bitc::FUNC_CODE_INST_ALLOCA; - const AllocaInst &AI = cast(I); + const auto &AI = cast(I); Vals.push_back(VE.getTypeID(AI.getAllocatedType())); Vals.push_back(VE.getTypeID(I.getOperand(0)->getType())); Vals.push_back(VE.getValueID(I.getOperand(0))); // size. @@ -2664,7 +2664,7 @@ Vals.push_back(getEncodedSynchScope(cast(I).getSynchScope())); break; case Instruction::Call: { - const CallInst &CI = cast(I); + const auto &CI = cast(I); FunctionType *FTy = CI.getFunctionType(); if (CI.hasOperandBundles()) @@ -2800,7 +2800,7 @@ unsigned AbbrevToUse = VST_ENTRY_8_ABBREV; NameVals.push_back(VE.getValueID(Name.getValue())); - Function *F = dyn_cast(Name.getValue()); + auto *F = dyn_cast(Name.getValue()); if (!F) { // If value is an alias, need to get the aliased base object to // see if it is a function. @@ -3270,7 +3270,7 @@ const Function &F) { NameVals.push_back(ValueID); - FunctionSummary *FS = cast(Summary); + auto *FS = cast(Summary); NameVals.push_back(getEncodedGVSummaryFlags(FS->flags())); NameVals.push_back(FS->instCount()); NameVals.push_back(FS->refs().size()); @@ -3319,7 +3319,7 @@ } auto *Summary = Summaries->second.front().get(); NameVals.push_back(VE.getValueID(&V)); - GlobalVarSummary *VS = cast(Summary); + auto *VS = cast(Summary); NameVals.push_back(getEncodedGVSummaryFlags(VS->flags())); unsigned SizeBeforeRefs = NameVals.size(); @@ -3428,7 +3428,7 @@ auto AliaseeId = VE.getValueID(Aliasee); NameVals.push_back(AliasId); auto *Summary = Index->getGlobalValueSummary(A); - AliasSummary *AS = cast(Summary); + auto *AS = cast(Summary); NameVals.push_back(getEncodedGVSummaryFlags(AS->flags())); NameVals.push_back(AliaseeId); Stream.EmitRecord(bitc::FS_ALIAS, NameVals, FSAliasAbbrev); Index: lib/Bitcode/Writer/ValueEnumerator.cpp =================================================================== --- lib/Bitcode/Writer/ValueEnumerator.cpp +++ lib/Bitcode/Writer/ValueEnumerator.cpp @@ -58,7 +58,7 @@ if (OM.lookup(V).first) return; - if (const Constant *C = dyn_cast(V)) + if (const auto *C = dyn_cast(V)) if (C->getNumOperands() && !isa(C)) for (const Value *Op : C->operands()) if (!isa(Op) && !isa(Op)) @@ -220,7 +220,7 @@ predictValueUseListOrderImpl(V, F, IDPair.first, OM, Stack); // Recursive descent into constants. - if (const Constant *C = dyn_cast(V)) + if (const auto *C = dyn_cast(V)) if (C->getNumOperands()) // Visit GlobalValues. for (const Value *Op : C->operands()) if (isa(Op)) // Visit GlobalValues. @@ -381,9 +381,9 @@ EnumerateMetadata(&F, MD->getMetadata()); } EnumerateType(I.getType()); - if (const CallInst *CI = dyn_cast(&I)) + if (const auto *CI = dyn_cast(&I)) EnumerateAttributes(CI->getAttributes()); - else if (const InvokeInst *II = dyn_cast(&I)) + else if (const auto *II = dyn_cast(&I)) EnumerateAttributes(II->getAttributes()); // Enumerate metadata attached with this instruction. @@ -789,7 +789,7 @@ // Enumerate the type of this value. EnumerateType(V->getType()); - if (const Constant *C = dyn_cast(V)) { + if (const auto *C = dyn_cast(V)) { if (isa(C)) { // Initializers for globals are handled explicitly elsewhere. } else if (C->getNumOperands()) { @@ -830,7 +830,7 @@ // If it is a non-anonymous struct, mark the type as being visited so that we // don't recursively visit it. This is safe because we allow forward // references of these in the bitcode reader. - if (StructType *STy = dyn_cast(Ty)) + if (auto *STy = dyn_cast(Ty)) if (!STy->isLiteral()) *TypeID = ~0U; @@ -863,7 +863,7 @@ assert(!isa(V) && "Unexpected metadata operand"); - const Constant *C = dyn_cast(V); + const auto *C = dyn_cast(V); if (!C) return; Index: lib/CodeGen/Analysis.cpp =================================================================== --- lib/CodeGen/Analysis.cpp +++ lib/CodeGen/Analysis.cpp @@ -43,7 +43,7 @@ return CurIndex; // Given a struct type, recursively traverse the elements. - if (StructType *STy = dyn_cast(Ty)) { + if (auto *STy = dyn_cast(Ty)) { for (StructType::element_iterator EB = STy->element_begin(), EI = EB, EE = STy->element_end(); @@ -56,7 +56,7 @@ return CurIndex; } // Given an array type, recursively traverse the elements. - else if (ArrayType *ATy = dyn_cast(Ty)) { + else if (auto *ATy = dyn_cast(Ty)) { Type *EltTy = ATy->getElementType(); unsigned NumElts = ATy->getNumElements(); // Compute the Linear offset when jumping one element of the array @@ -87,7 +87,7 @@ SmallVectorImpl *Offsets, uint64_t StartingOffset) { // Given a struct type, recursively traverse the elements. - if (StructType *STy = dyn_cast(Ty)) { + if (auto *STy = dyn_cast(Ty)) { const StructLayout *SL = DL.getStructLayout(STy); for (StructType::element_iterator EB = STy->element_begin(), EI = EB, @@ -98,7 +98,7 @@ return; } // Given an array type, recursively traverse the elements. - if (ArrayType *ATy = dyn_cast(Ty)) { + if (auto *ATy = dyn_cast(Ty)) { Type *EltTy = ATy->getElementType(); uint64_t EltSize = DL.getTypeAllocSize(EltTy); for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) @@ -118,8 +118,8 @@ /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V. GlobalValue *llvm::ExtractTypeInfo(Value *V) { V = V->stripPointerCasts(); - GlobalValue *GV = dyn_cast(V); - GlobalVariable *Var = dyn_cast(V); + auto *GV = dyn_cast(V); + auto *Var = dyn_cast(V); if (Var && Var->getName() == "llvm.eh.catch.all.value") { assert(Var->hasInitializer() && @@ -239,7 +239,7 @@ while (true) { // Try to look through V1; if V1 is not an instruction, it can't be looked // through. - const Instruction *I = dyn_cast(V); + const auto *I = dyn_cast(V); if (!I || I->getNumOperands() == 0) return V; const Value *NoopInput = nullptr; @@ -294,7 +294,7 @@ break; } } - } else if (const InsertValueInst *IVI = dyn_cast(V)) { + } else if (const auto *IVI = dyn_cast(V)) { // Value may come from either the aggregate or the scalar ArrayRef InsertLoc = IVI->getIndices(); if (ValLoc.size() >= InsertLoc.size() && @@ -309,7 +309,7 @@ // change of address. NoopInput = Op; } - } else if (const ExtractValueInst *EVI = dyn_cast(V)) { + } else if (const auto *EVI = dyn_cast(V)) { // The part we're interested in will inevitably be some sub-section of the // previous aggregate. Combine the two paths to obtain the true address of // our element. @@ -374,7 +374,7 @@ /// For an aggregate type, determine whether a given index is within bounds or /// not. static bool indexReallyValid(CompositeType *T, unsigned Idx) { - if (ArrayType *AT = dyn_cast(T)) + if (auto *AT = dyn_cast(T)) return Idx < AT->getNumElements(); return Idx < cast(T)->getNumElements(); @@ -415,7 +415,7 @@ ++Path.back(); Type *DeeperType = SubTypes.back()->getTypeAtIndex(Path.back()); while (DeeperType->isAggregateType()) { - CompositeType *CT = cast(DeeperType); + auto *CT = cast(DeeperType); if (!indexReallyValid(CT, 0)) return true; @@ -491,7 +491,7 @@ const Instruction *I = CS.getInstruction(); const BasicBlock *ExitBB = I->getParent(); const TerminatorInst *Term = ExitBB->getTerminator(); - const ReturnInst *Ret = dyn_cast(Term); + const auto *Ret = dyn_cast(Term); // The block must end in a return statement or unreachable. // @@ -642,7 +642,7 @@ // If it is a non constant variable, it needs to be uniqued across shared // objects. - if (const GlobalVariable *Var = dyn_cast(GV)) { + if (const auto *Var = dyn_cast(GV)) { if (!Var->isConstant()) return false; } Index: lib/CodeGen/AsmPrinter/AsmPrinter.cpp =================================================================== --- lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -83,7 +83,7 @@ static unsigned getGVAlignmentLog2(const GlobalValue *GV, const DataLayout &DL, unsigned InBits = 0) { unsigned NumBits = 0; - if (const GlobalVariable *GVar = dyn_cast(GV)) + if (const auto *GVar = dyn_cast(GV)) NumBits = DL.getPreferredAlignmentLog(GVar); // If InBits is specified, round it to it. @@ -1556,7 +1556,7 @@ void AsmPrinter::EmitLLVMUsedList(const ConstantArray *InitList) { // Should be an array of 'i8*'. for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i) { - const GlobalValue *GV = + const auto *GV = dyn_cast(InitList->getOperand(i)->stripPointerCasts()); if (GV) OutStreamer->EmitSymbolAttribute(getSymbol(GV), MCSA_NoDeadStrip); @@ -1581,9 +1581,9 @@ if (!isa(List)) return; // Sanity check the structors list. - const ConstantArray *InitList = dyn_cast(List); + const auto *InitList = dyn_cast(List); if (!InitList) return; // Not an array! - StructType *ETy = dyn_cast(InitList->getType()->getElementType()); + auto *ETy = dyn_cast(InitList->getType()->getElementType()); // FIXME: Only allow the 3-field form in LLVM 4.0. if (!ETy || ETy->getNumElements() < 2 || ETy->getNumElements() > 3) return; // Not an array of two or three elements! @@ -1595,11 +1595,11 @@ // Gather the structors in a form that's convenient for sorting by priority. SmallVector Structors; for (Value *O : InitList->operands()) { - ConstantStruct *CS = dyn_cast(O); + auto *CS = dyn_cast(O); if (!CS) continue; // Malformed. if (CS->getOperand(1)->isNullValue()) break; // Found a null terminator, skip the rest. - ConstantInt *Priority = dyn_cast(CS->getOperand(0)); + auto *Priority = dyn_cast(CS->getOperand(0)); if (!Priority) continue; // Malformed. Structors.push_back(Structor()); Structor &S = Structors.back(); @@ -1645,7 +1645,7 @@ const MDNode *N = NMD->getOperand(i); assert(N->getNumOperands() == 1 && "llvm.ident metadata entry can have only one operand"); - const MDString *S = cast(N->getOperand(0)); + const auto *S = cast(N->getOperand(0)); OutStreamer->EmitIdent(S->getString()); } } @@ -1734,16 +1734,16 @@ if (CV->isNullValue() || isa(CV)) return MCConstantExpr::create(0, Ctx); - if (const ConstantInt *CI = dyn_cast(CV)) + if (const auto *CI = dyn_cast(CV)) return MCConstantExpr::create(CI->getZExtValue(), Ctx); - if (const GlobalValue *GV = dyn_cast(CV)) + if (const auto *GV = dyn_cast(CV)) return MCSymbolRefExpr::create(getSymbol(GV), Ctx); - if (const BlockAddress *BA = dyn_cast(CV)) + if (const auto *BA = dyn_cast(CV)) return MCSymbolRefExpr::create(GetBlockAddressSymbol(BA), Ctx); - const ConstantExpr *CE = dyn_cast(CV); + const auto *CE = dyn_cast(CV); if (!CE) { llvm_unreachable("Unknown constant value to lower!"); } @@ -1900,7 +1900,7 @@ /// composed of a repeated sequence of identical bytes and return the /// byte value. If it is not a repeated sequence, return -1. static int isRepeatedByteSequence(const Value *V, const DataLayout &DL) { - if (const ConstantInt *CI = dyn_cast(V)) { + if (const auto *CI = dyn_cast(V)) { uint64_t Size = DL.getTypeAllocSizeInBits(V->getType()); assert(Size % 8 == 0); @@ -1911,7 +1911,7 @@ return Value.zextOrTrunc(8).getZExtValue(); } - if (const ConstantArray *CA = dyn_cast(V)) { + if (const auto *CA = dyn_cast(V)) { // Make sure all array elements are sequences of the same repeated // byte. assert(CA->getNumOperands() != 0 && "Should be a CAZ"); @@ -1927,7 +1927,7 @@ return Byte; } - if (const ConstantDataSequential *CDS = dyn_cast(V)) + if (const auto *CDS = dyn_cast(V)) return isRepeatedByteSequence(CDS); return -1; @@ -2175,7 +2175,7 @@ if (!AP.GlobalGOTEquivs.count(GOTEquivSym)) return; - const GlobalValue *BaseGV = dyn_cast_or_null(BaseCst); + const auto *BaseGV = dyn_cast_or_null(BaseCst); if (!BaseGV) return; @@ -2219,7 +2219,7 @@ AsmPrinter::GOTEquivUsePair Result = AP.GlobalGOTEquivs[GOTEquivSym]; const GlobalVariable *GV = Result.first; int NumUses = (int)Result.second; - const GlobalValue *FinalGV = dyn_cast(GV->getOperand(0)); + const auto *FinalGV = dyn_cast(GV->getOperand(0)); const MCSymbol *FinalSym = AP.getSymbol(FinalGV); *ME = AP.getObjFileLowering().getIndirectSymViaGOTPCRel( FinalSym, MV, Offset, AP.MMI, *AP.OutStreamer); @@ -2244,7 +2244,7 @@ if (isa(CV) || isa(CV)) return AP.OutStreamer->EmitZeros(Size); - if (const ConstantInt *CI = dyn_cast(CV)) { + if (const auto *CI = dyn_cast(CV)) { switch (Size) { case 1: case 2: @@ -2261,7 +2261,7 @@ } } - if (const ConstantFP *CFP = dyn_cast(CV)) + if (const auto *CFP = dyn_cast(CV)) return emitGlobalConstantFP(CFP, AP); if (isa(CV)) { @@ -2269,16 +2269,16 @@ return; } - if (const ConstantDataSequential *CDS = dyn_cast(CV)) + if (const auto *CDS = dyn_cast(CV)) return emitGlobalConstantDataSequential(DL, CDS, AP); - if (const ConstantArray *CVA = dyn_cast(CV)) + if (const auto *CVA = dyn_cast(CV)) return emitGlobalConstantArray(DL, CVA, AP, BaseCV, Offset); - if (const ConstantStruct *CVS = dyn_cast(CV)) + if (const auto *CVS = dyn_cast(CV)) return emitGlobalConstantStruct(DL, CVS, AP, BaseCV, Offset); - if (const ConstantExpr *CE = dyn_cast(CV)) { + if (const auto *CE = dyn_cast(CV)) { // Look through bitcasts, which might not be able to be MCExpr'ized (e.g. of // vectors). if (CE->getOpcode() == Instruction::BitCast) @@ -2294,7 +2294,7 @@ } } - if (const ConstantVector *V = dyn_cast(CV)) + if (const auto *V = dyn_cast(CV)) return emitGlobalConstantVector(DL, V, AP); // Otherwise, it must be a ConstantExpr. Lower it to an MCExpr, then emit it Index: lib/CodeGen/AsmPrinter/CodeViewDebug.cpp =================================================================== --- lib/CodeGen/AsmPrinter/CodeViewDebug.cpp +++ lib/CodeGen/AsmPrinter/CodeViewDebug.cpp @@ -728,7 +728,7 @@ GVSym ? dyn_cast(&GVSym->getSection()) : nullptr; const MCSymbol *KeySym = GVSec ? GVSec->getCOMDATSymbol() : nullptr; - MCSectionCOFF *DebugSec = cast( + auto *DebugSec = cast( Asm->getObjFileLowering().getCOFFDebugSymbolsSection()); DebugSec = OS.getContext().getAssociativeCOFFSection(DebugSec, KeySym); @@ -1173,7 +1173,7 @@ const DINode *Element = Elements[i]; assert(Element->getTag() == dwarf::DW_TAG_subrange_type); - const DISubrange *Subrange = cast(Element); + const auto *Subrange = cast(Element); assert(Subrange->getLowerBound() == 0 && "codeview doesn't support subranges with lower bounds"); int64_t Count = Subrange->getCount(); @@ -1657,7 +1657,7 @@ assert((DDTy->getOffsetInBits() % 8) == 0 && "Unnamed bitfield member!"); uint64_t Offset = DDTy->getOffsetInBits(); const DIType *Ty = DDTy->getBaseType().resolve(); - const DICompositeType *DCTy = cast(Ty); + const auto *DCTy = cast(Ty); ClassInfo NestedInfo = collectClassInfo(DCTy); for (const ClassInfo::MemberInfo &IndirectField : NestedInfo.Members) Info.Members.push_back( @@ -2233,7 +2233,7 @@ NamedMDNode *CUs = MMI->getModule()->getNamedMetadata("llvm.dbg.cu"); for (const MDNode *Node : CUs->operands()) { for (auto *Ty : cast(Node)->getRetainedTypes()) { - if (DIType *RT = dyn_cast(Ty)) { + if (auto *RT = dyn_cast(Ty)) { getTypeIndex(RT); // FIXME: Add to global/local DTU list. } Index: lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp =================================================================== --- lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp +++ lib/CodeGen/AsmPrinter/DebugHandlerBase.cpp @@ -88,7 +88,7 @@ uint64_t DebugHandlerBase::getBaseTypeSize(const DITypeRef TyRef) { DIType *Ty = TyRef.resolve(); assert(Ty); - DIDerivedType *DDTy = dyn_cast(Ty); + auto *DDTy = dyn_cast(Ty); if (!DDTy) return Ty->getSizeInBits(); Index: lib/CodeGen/AsmPrinter/DwarfDebug.cpp =================================================================== --- lib/CodeGen/AsmPrinter/DwarfDebug.cpp +++ lib/CodeGen/AsmPrinter/DwarfDebug.cpp @@ -501,7 +501,7 @@ for (auto *Ty : CUNode->getRetainedTypes()) { // The retained types array by design contains pointers to // MDNodes rather than DIRefs. Unique them here. - if (DIType *RT = dyn_cast(Ty)) + if (auto *RT = dyn_cast(Ty)) if (!RT->isExternalTypeRef()) // There is no point in force-emitting a forward declaration. CU.getOrCreateTypeDIE(RT); @@ -985,7 +985,7 @@ // If the variable has a DIBasicType, extract it. Basic types cannot have // unique identifiers, so don't bother resolving the type with the // identifier map. - const DIBasicType *BT = dyn_cast( + const auto *BT = dyn_cast( static_cast(IV.first->getType())); // Finalize the entry by lowering it into a DWARF bytestream. Index: lib/CodeGen/AsmPrinter/DwarfUnit.cpp =================================================================== --- lib/CodeGen/AsmPrinter/DwarfUnit.cpp +++ lib/CodeGen/AsmPrinter/DwarfUnit.cpp @@ -1512,9 +1512,9 @@ addUInt(StaticMemberDIE, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1, dwarf::DW_ACCESS_public); - if (const ConstantInt *CI = dyn_cast_or_null(DT->getConstant())) + if (const auto *CI = dyn_cast_or_null(DT->getConstant())) addConstantValue(StaticMemberDIE, CI, Ty); - if (const ConstantFP *CFP = dyn_cast_or_null(DT->getConstant())) + if (const auto *CFP = dyn_cast_or_null(DT->getConstant())) addConstantFPValue(StaticMemberDIE, CFP); if (uint32_t AlignInBytes = DT->getAlignInBytes()) Index: lib/CodeGen/AsmPrinter/EHStreamer.cpp =================================================================== --- lib/CodeGen/AsmPrinter/EHStreamer.cpp +++ lib/CodeGen/AsmPrinter/EHStreamer.cpp @@ -167,7 +167,7 @@ if (!MO.isGlobal()) continue; - const Function *F = dyn_cast(MO.getGlobal()); + const auto *F = dyn_cast(MO.getGlobal()); if (!F) continue; if (SawFunc) { Index: lib/CodeGen/AsmPrinter/WinException.cpp =================================================================== --- lib/CodeGen/AsmPrinter/WinException.cpp +++ lib/CodeGen/AsmPrinter/WinException.cpp @@ -961,7 +961,7 @@ OS.EmitValueToAlignment(4); OS.EmitLabel(LSDALabel); - const Function *Per = + const auto *Per = dyn_cast(F->getPersonalityFn()->stripPointerCasts()); StringRef PerName = Per->getName(); int BaseState = -1; Index: lib/CodeGen/AtomicExpandPass.cpp =================================================================== --- lib/CodeGen/AtomicExpandPass.cpp +++ lib/CodeGen/AtomicExpandPass.cpp @@ -1147,7 +1147,7 @@ // against the desired one, and replace them with the CFG-derived version. SmallVector PrunedInsts; for (auto User : CI->users()) { - ExtractValueInst *EV = dyn_cast(User); + auto *EV = dyn_cast(User); if (!EV) continue; Index: lib/CodeGen/BuiltinGCs.cpp =================================================================== --- lib/CodeGen/BuiltinGCs.cpp +++ lib/CodeGen/BuiltinGCs.cpp @@ -79,7 +79,7 @@ } Optional isGCManagedPointer(const Type *Ty) const override { // Method is only valid on pointer typed values. - const PointerType *PT = cast(Ty); + const auto *PT = cast(Ty); // For the sake of this example GC, we arbitrarily pick addrspace(1) as our // GC managed heap. We know that a pointer into this heap needs to be // updated and that no other pointer does. Note that addrspace(1) is used @@ -112,7 +112,7 @@ } Optional isGCManagedPointer(const Type *Ty) const override { // Method is only valid on pointer typed values. - const PointerType *PT = cast(Ty); + const auto *PT = cast(Ty); // We pick addrspace(1) as our GC managed heap. return (1 == PT->getAddressSpace()); } Index: lib/CodeGen/CodeGenPrepare.cpp =================================================================== --- lib/CodeGen/CodeGenPrepare.cpp +++ lib/CodeGen/CodeGenPrepare.cpp @@ -364,7 +364,7 @@ // Don't merge if BB's address is taken. if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue; - BranchInst *Term = dyn_cast(SinglePred->getTerminator()); + auto *Term = dyn_cast(SinglePred->getTerminator()); if (Term && !Term->isConditional()) { Changed = true; DEBUG(dbgs() << "To merge:\n"<< *SinglePred << "\n\n\n"); @@ -403,7 +403,7 @@ BasicBlock *BB = &*I++; // If this block doesn't end with an uncond branch, ignore it. - BranchInst *BI = dyn_cast(BB->getTerminator()); + auto *BI = dyn_cast(BB->getTerminator()); if (!BI || !BI->isUnconditional()) continue; @@ -452,18 +452,18 @@ // the successor. If there are more complex condition (e.g. preheaders), // don't mess around with them. BasicBlock::const_iterator BBI = BB->begin(); - while (const PHINode *PN = dyn_cast(BBI++)) { + while (const auto *PN = dyn_cast(BBI++)) { for (const User *U : PN->users()) { - const Instruction *UI = cast(U); + const auto *UI = cast(U); if (UI->getParent() != DestBB || !isa(UI)) return false; // If User is inside DestBB block and it is a PHINode then check // incoming value. If incoming value is not from BB then this is // a complex condition (e.g. preheaders) we want to avoid here. if (UI->getParent() == DestBB) { - if (const PHINode *UPN = dyn_cast(UI)) + if (const auto *UPN = dyn_cast(UI)) for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { - Instruction *Insn = dyn_cast(UPN->getIncomingValue(I)); + auto *Insn = dyn_cast(UPN->getIncomingValue(I)); if (Insn && Insn->getParent() == BB && Insn->getParent() != UPN->getIncomingBlock(I)) return false; @@ -475,12 +475,12 @@ // If BB and DestBB contain any common predecessors, then the phi nodes in BB // and DestBB may have conflicting incoming values for the block. If so, we // can't merge the block. - const PHINode *DestBBPN = dyn_cast(DestBB->begin()); + const auto *DestBBPN = dyn_cast(DestBB->begin()); if (!DestBBPN) return true; // no conflict. // Collect the preds of BB. SmallPtrSet BBPreds; - if (const PHINode *BBPN = dyn_cast(BB->begin())) { + if (const auto *BBPN = dyn_cast(BB->begin())) { // It is faster to get preds from a PHI than with pred_iterator. for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) BBPreds.insert(BBPN->getIncomingBlock(i)); @@ -493,12 +493,12 @@ BasicBlock *Pred = DestBBPN->getIncomingBlock(i); if (BBPreds.count(Pred)) { // Common predecessor? BBI = DestBB->begin(); - while (const PHINode *PN = dyn_cast(BBI++)) { + while (const auto *PN = dyn_cast(BBI++)) { const Value *V1 = PN->getIncomingValueForBlock(Pred); const Value *V2 = PN->getIncomingValueForBlock(BB); // If V2 is a phi node in BB, look up what the mapped value will be. - if (const PHINode *V2PN = dyn_cast(V2)) + if (const auto *V2PN = dyn_cast(V2)) if (V2PN->getParent() == BB) V2 = V2PN->getIncomingValueForBlock(Pred); @@ -515,7 +515,7 @@ /// Eliminate a basic block that has only phi's and an unconditional branch in /// it. void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) { - BranchInst *BI = cast(BB->getTerminator()); + auto *BI = cast(BB->getTerminator()); BasicBlock *DestBB = BI->getSuccessor(0); DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB); @@ -547,7 +547,7 @@ // Two options: either the InVal is a phi node defined in BB or it is some // value that dominates BB. - PHINode *InValPhi = dyn_cast(InVal); + auto *InValPhi = dyn_cast(InVal); if (InValPhi && InValPhi->getParent() == BB) { // Add all of the input values of the input PHI as inputs of this phi. for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) @@ -556,7 +556,7 @@ } else { // Otherwise, add one instance of the dominating value for each edge that // we will be adding. - if (PHINode *BBPN = dyn_cast(BB->begin())) { + if (auto *BBPN = dyn_cast(BB->begin())) { for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) PN->addIncoming(InVal, BBPN->getIncomingBlock(i)); } else { @@ -731,7 +731,7 @@ SmallVector AllRelocateCalls; for (auto *U : I.users()) - if (GCRelocateInst *Relocate = dyn_cast(U)) + if (auto *Relocate = dyn_cast(U)) // Collect all the relocate calls associated with a statepoint AllRelocateCalls.push_back(Relocate); @@ -765,12 +765,12 @@ for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); UI != E; ) { Use &TheUse = UI.getUse(); - Instruction *User = cast(*UI); + auto *User = cast(*UI); // Figure out which BB this cast is used in. For PHI's this is the // appropriate predecessor block. BasicBlock *UserBB = User->getParent(); - if (PHINode *PN = dyn_cast(User)) { + if (auto *PN = dyn_cast(User)) { UserBB = PN->getIncomingBlock(TheUse); } @@ -928,7 +928,7 @@ for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); UI != E; ) { Use &TheUse = UI.getUse(); - Instruction *User = cast(*UI); + auto *User = cast(*UI); // Preincrement use iterator so we don't invalidate it. ++UI; @@ -1007,7 +1007,7 @@ const TargetLowering &TLI, const DataLayout &DL) { BasicBlock *UserBB = User->getParent(); DenseMap InsertedTruncs; - TruncInst *TruncI = dyn_cast(User); + auto *TruncI = dyn_cast(User); bool MadeChange = false; for (Value::user_iterator TruncUI = TruncI->user_begin(), @@ -1015,7 +1015,7 @@ TruncUI != TruncE;) { Use &TruncTheUse = TruncUI.getUse(); - Instruction *TruncUser = cast(*TruncUI); + auto *TruncUser = cast(*TruncUI); // Preincrement use iterator so we don't invalidate it. ++TruncUI; @@ -1103,7 +1103,7 @@ for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end(); UI != E;) { Use &TheUse = UI.getUse(); - Instruction *User = cast(*UI); + auto *User = cast(*UI); // Preincrement use iterator so we don't invalidate it. ++UI; @@ -1211,7 +1211,7 @@ Value *Src0 = CI->getArgOperand(3); unsigned AlignVal = cast(Alignment)->getZExtValue(); - VectorType *VecType = dyn_cast(CI->getType()); + auto *VecType = dyn_cast(CI->getType()); assert(VecType && "Unexpected return type of masked load intrinsic"); Type *EltTy = CI->getType()->getVectorElementType(); @@ -1357,7 +1357,7 @@ Value *Mask = CI->getArgOperand(3); unsigned AlignVal = cast(Alignment)->getZExtValue(); - VectorType *VecType = dyn_cast(Src->getType()); + auto *VecType = dyn_cast(Src->getType()); assert(VecType && "Unexpected data type in masked store intrinsic"); Type *EltTy = VecType->getElementType(); @@ -1475,7 +1475,7 @@ Value *Mask = CI->getArgOperand(2); Value *Src0 = CI->getArgOperand(3); - VectorType *VecType = dyn_cast(CI->getType()); + auto *VecType = dyn_cast(CI->getType()); assert(VecType && "Unexpected return type of masked load intrinsic"); @@ -1816,9 +1816,9 @@ } // If this is a memcpy (or similar) then we may be able to improve the // alignment - if (MemIntrinsic *MI = dyn_cast(CI)) { + if (auto *MI = dyn_cast(CI)) { unsigned Align = getKnownAlignment(MI->getDest(), *DL); - if (MemTransferInst *MTI = dyn_cast(MI)) + if (auto *MTI = dyn_cast(MI)) Align = std::min(Align, getKnownAlignment(MTI->getSource(), *DL)); if (Align > MI->getAlignment()) MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), Align)); @@ -1837,7 +1837,7 @@ return optimizeMemoryInst(CI, Arg, Arg->getType(), AS); } - IntrinsicInst *II = dyn_cast(CI); + auto *II = dyn_cast(CI); if (II) { switch (II->getIntrinsicID()) { default: break; @@ -1846,7 +1846,7 @@ uint64_t Size; Type *ReturnTy = CI->getType(); Constant *RetVal = nullptr; - ConstantInt *Op1 = cast(II->getArgOperand(1)); + auto *Op1 = cast(II->getArgOperand(1)); ObjSizeMode Mode = Op1->isZero() ? ObjSizeMode::Max : ObjSizeMode::Min; if (getObjectSize(II->getArgOperand(0), Size, *DL, TLInfo, false, Mode)) { @@ -1906,7 +1906,7 @@ } case Intrinsic::aarch64_stlxr: case Intrinsic::aarch64_stxr: { - ZExtInst *ExtVal = dyn_cast(CI->getArgOperand(0)); + auto *ExtVal = dyn_cast(CI->getArgOperand(0)); if (!ExtVal || !ExtVal->hasOneUse() || ExtVal->getParent() == CI->getParent()) return false; @@ -1992,7 +1992,7 @@ if (!TLI) return false; - ReturnInst *RetI = dyn_cast(BB->getTerminator()); + auto *RetI = dyn_cast(BB->getTerminator()); if (!RetI) return false; @@ -2035,7 +2035,7 @@ SmallVector TailCalls; if (PN) { for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) { - CallInst *CI = dyn_cast(PN->getIncomingValue(I)); + auto *CI = dyn_cast(PN->getIncomingValue(I)); // Make sure the phi value is indeed produced by the tail call. if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) && TLI->mayBeEmittedAsTailCall(CI) && @@ -2055,7 +2055,7 @@ if (RI == RE) continue; - CallInst *CI = dyn_cast(&*RI); + auto *CI = dyn_cast(&*RI); if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) && attributesPermitTailCall(F, CI, RetI, *TLI)) TailCalls.push_back(CI); @@ -2079,7 +2079,7 @@ // Make sure the call instruction is followed by an unconditional branch to // the return block. BasicBlock *CallBB = CI->getParent(); - BranchInst *BI = dyn_cast(CallBB->getTerminator()); + auto *BI = dyn_cast(CallBB->getTerminator()); if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB) continue; @@ -2335,7 +2335,7 @@ /// \brief Remove the built instruction. void undo() override { DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n"); - if (Instruction *IVal = dyn_cast(Val)) + if (auto *IVal = dyn_cast(Val)) IVal->eraseFromParent(); } }; @@ -2360,7 +2360,7 @@ /// \brief Remove the built instruction. void undo() override { DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n"); - if (Instruction *IVal = dyn_cast(Val)) + if (auto *IVal = dyn_cast(Val)) IVal->eraseFromParent(); } }; @@ -2385,7 +2385,7 @@ /// \brief Remove the built instruction. void undo() override { DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n"); - if (Instruction *IVal = dyn_cast(Val)) + if (auto *IVal = dyn_cast(Val)) IVal->eraseFromParent(); } }; @@ -2435,7 +2435,7 @@ << "\n"); // Record the original uses. for (Use &U : Inst->uses()) { - Instruction *UserI = cast(U.getUser()); + auto *UserI = cast(U.getUser()); OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo())); } // Now, we can replace the uses. @@ -2774,7 +2774,7 @@ /// to be legal, as the non-promoted value would have had the same state. static bool isPromotedInstructionLegal(const TargetLowering &TLI, const DataLayout &DL, Value *Val) { - Instruction *PromotedInst = dyn_cast(Val); + auto *PromotedInst = dyn_cast(Val); if (!PromotedInst) return false; int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode()); @@ -2902,7 +2902,7 @@ // We can get through binary operator, if it is legal. In other words, the // binary operator must have a nuw or nsw flag. - const BinaryOperator *BinOp = dyn_cast(Inst); + const auto *BinOp = dyn_cast(Inst); if (BinOp && isa(BinOp) && ((!IsSExt && BinOp->hasNoUnsignedWrap()) || (IsSExt && BinOp->hasNoSignedWrap()))) @@ -2924,7 +2924,7 @@ // If the operand of the truncate is not an instruction, we will not have // any information on the dropped bits. // (Actually we could for constant but it is not worth the extra logic). - Instruction *Opnd = dyn_cast(OpndVal); + auto *Opnd = dyn_cast(OpndVal); if (!Opnd) return false; @@ -2951,7 +2951,7 @@ const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) { assert((isa(Ext) || isa(Ext)) && "Unexpected instruction type"); - Instruction *ExtOpnd = dyn_cast(Ext->getOperand(0)); + auto *ExtOpnd = dyn_cast(Ext->getOperand(0)); Type *ExtTy = Ext->getType(); bool IsSExt = isa(Ext); // If the operand of the extension is not an instruction, we cannot @@ -2986,7 +2986,7 @@ SmallVectorImpl *Truncs, const TargetLowering &TLI) { // By construction, the operand of SExt is an instruction. Otherwise we cannot // get through it and this method should not be called. - Instruction *SExtOpnd = cast(SExt->getOperand(0)); + auto *SExtOpnd = cast(SExt->getOperand(0)); Value *ExtVal = SExt; bool HasMergedNonFreeExt = false; if (isa(SExtOpnd)) { @@ -3010,7 +3010,7 @@ TPT.eraseInstruction(SExtOpnd); // Check if the extension is still needed. - Instruction *ExtInst = dyn_cast(ExtVal); + auto *ExtInst = dyn_cast(ExtVal); if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) { if (ExtInst) { if (Exts) @@ -3035,7 +3035,7 @@ bool IsSExt) { // By construction, the operand of Ext is an instruction. Otherwise we cannot // get through it and this method should not be called. - Instruction *ExtOpnd = cast(Ext->getOperand(0)); + auto *ExtOpnd = cast(Ext->getOperand(0)); CreatedInstsCost = 0; if (!ExtOpnd->hasOneUse()) { // ExtOpnd will be promoted. @@ -3043,7 +3043,7 @@ // promoted version. // Create the truncate now. Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType()); - if (Instruction *ITrunc = dyn_cast(Trunc)) { + if (auto *ITrunc = dyn_cast(Trunc)) { ITrunc->removeFromParent(); // Insert it just after the definition. ITrunc->insertAfter(ExtOpnd); @@ -3084,7 +3084,7 @@ } // Check if we can statically extend the operand. Value *Opnd = ExtOpnd->getOperand(OpIdx); - if (const ConstantInt *Cst = dyn_cast(Opnd)) { + if (const auto *Cst = dyn_cast(Opnd)) { DEBUG(dbgs() << "Statically extend\n"); unsigned BitWidth = Ext->getType()->getIntegerBitWidth(); APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth) @@ -3243,7 +3243,7 @@ case Instruction::Mul: case Instruction::Shl: { // Can only handle X*C and X << C. - ConstantInt *RHS = dyn_cast(AddrInst->getOperand(1)); + auto *RHS = dyn_cast(AddrInst->getOperand(1)); if (!RHS) return false; int64_t Scale = RHS->getSExtValue(); @@ -3268,7 +3268,7 @@ ConstantOffset += SL->getElementOffset(Idx); } else { uint64_t TypeSize = DL.getTypeAllocSize(GTI.getIndexedType()); - if (ConstantInt *CI = dyn_cast(AddrInst->getOperand(i))) { + if (auto *CI = dyn_cast(AddrInst->getOperand(i))) { ConstantOffset += CI->getSExtValue()*TypeSize; } else if (TypeSize) { // Scales of zero don't do anything. // We only allow one variable index at the moment. @@ -3340,7 +3340,7 @@ } case Instruction::SExt: case Instruction::ZExt: { - Instruction *Ext = dyn_cast(AddrInst); + auto *Ext = dyn_cast(AddrInst); if (!Ext) return false; @@ -3407,13 +3407,13 @@ // fails. TypePromotionTransaction::ConstRestorationPt LastKnownGood = TPT.getRestorationPoint(); - if (ConstantInt *CI = dyn_cast(Addr)) { + if (auto *CI = dyn_cast(Addr)) { // Fold in immediates if legal for the target. AddrMode.BaseOffs += CI->getSExtValue(); if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) return true; AddrMode.BaseOffs -= CI->getSExtValue(); - } else if (GlobalValue *GV = dyn_cast(Addr)) { + } else if (auto *GV = dyn_cast(Addr)) { // If this is a global variable, try to fold it into the addressing mode. if (!AddrMode.BaseGV) { AddrMode.BaseGV = GV; @@ -3421,7 +3421,7 @@ return true; AddrMode.BaseGV = nullptr; } - } else if (Instruction *I = dyn_cast(Addr)) { + } else if (auto *I = dyn_cast(Addr)) { ExtAddrMode BackupAddrMode = AddrMode; unsigned OldSize = AddrModeInsts.size(); @@ -3447,7 +3447,7 @@ AddrModeInsts.resize(OldSize); TPT.rollback(LastKnownGood); } - } else if (ConstantExpr *CE = dyn_cast(Addr)) { + } else if (auto *CE = dyn_cast(Addr)) { if (matchOperationAddr(CE, CE->getOpcode(), Depth)) return true; TPT.rollback(LastKnownGood); @@ -3527,27 +3527,27 @@ // Loop over all the uses, recursively processing them. for (Use &U : I->uses()) { - Instruction *UserI = cast(U.getUser()); + auto *UserI = cast(U.getUser()); - if (LoadInst *LI = dyn_cast(UserI)) { + if (auto *LI = dyn_cast(UserI)) { MemoryUses.push_back(std::make_pair(LI, U.getOperandNo())); continue; } - if (StoreInst *SI = dyn_cast(UserI)) { + if (auto *SI = dyn_cast(UserI)) { unsigned opNo = U.getOperandNo(); if (opNo == 0) return true; // Storing addr, not into addr. MemoryUses.push_back(std::make_pair(SI, opNo)); continue; } - if (CallInst *CI = dyn_cast(UserI)) { + if (auto *CI = dyn_cast(UserI)) { // If this is a cold call, we can sink the addressing calculation into // the cold path. See optimizeCallInst if (!OptSize && CI->hasFnAttr(Attribute::Cold)) continue; - InlineAsm *IA = dyn_cast(CI->getCalledValue()); + auto *IA = dyn_cast(CI->getCalledValue()); if (!IA) return true; // If this is a memory operand, we're cool, otherwise bail out. @@ -3579,7 +3579,7 @@ // If Val is a constant sized alloca in the entry block, it is live, this is // true because it is just a reference to the stack/frame pointer, which is // live for the whole function. - if (AllocaInst *AI = dyn_cast(Val)) + if (auto *AI = dyn_cast(Val)) if (AI->isStaticAlloca()) return true; @@ -3664,7 +3664,7 @@ // Get the access type of this use. If the use isn't a pointer, we don't // know what it accesses. Value *Address = User->getOperand(OpNo); - PointerType *AddrTy = dyn_cast(Address->getType()); + auto *AddrTy = dyn_cast(Address->getType()); if (!AddrTy) return false; Type *AddressAccessTy = AddrTy->getElementType(); @@ -3703,7 +3703,7 @@ /// Return true if the specified values are defined in a /// different basic block than BB. static bool IsNonLocalValue(Value *V, BasicBlock *BB) { - if (Instruction *I = dyn_cast(V)) + if (auto *I = dyn_cast(V)) return I->getParent() != BB; return false; } @@ -3759,7 +3759,7 @@ } // For a PHI node, push all of its incoming values. - if (PHINode *P = dyn_cast(V)) { + if (auto *P = dyn_cast(V)) { for (Value *IncValue : P->incoming_values()) worklist.push_back(IncValue); continue; @@ -3921,7 +3921,7 @@ // the original IR value was tossed in favor of a constant back when // the AddrMode was created we need to bail out gracefully if widths // do not match instead of extending it. - Instruction *I = dyn_cast_or_null(ResultIndex); + auto *I = dyn_cast_or_null(ResultIndex); if (I && (ResultIndex != AddrMode.BaseReg)) I->eraseFromParent(); return false; @@ -3997,7 +3997,7 @@ // the original IR value was tossed in favor of a constant back when // the AddrMode was created we need to bail out gracefully if widths // do not match instead of extending it. - Instruction *I = dyn_cast_or_null(Result); + auto *I = dyn_cast_or_null(Result); if (I && (Result != AddrMode.BaseReg)) I->eraseFromParent(); return false; @@ -4090,11 +4090,11 @@ /// sign extensions. static bool hasSameExtUse(Instruction *Inst, const TargetLowering &TLI) { assert(!Inst->use_empty() && "Input must have at least one use"); - const Instruction *FirstUser = cast(*Inst->user_begin()); + const auto *FirstUser = cast(*Inst->user_begin()); bool IsSExt = isa(FirstUser); Type *ExtTy = FirstUser->getType(); for (const User *U : Inst->users()) { - const Instruction *UI = cast(U); + const auto *UI = cast(U); if ((IsSExt && !isa(UI)) || (!IsSExt && !isa(UI))) return false; Type *CurTy = UI->getType(); @@ -4324,7 +4324,7 @@ bool DefIsLiveOut = false; for (User *U : I->users()) { - Instruction *UI = cast(U); + auto *UI = cast(U); // Figure out which BB this ext is used in. BasicBlock *UserBB = UI->getParent(); @@ -4337,7 +4337,7 @@ // Make sure none of the uses are PHI nodes. for (User *U : Src->users()) { - Instruction *UI = cast(U); + auto *UI = cast(U); BasicBlock *UserBB = UI->getParent(); if (UserBB == DefBB) continue; // Be conservative. We don't want this xform to end up introducing @@ -4351,7 +4351,7 @@ bool MadeChange = false; for (Use &U : Src->uses()) { - Instruction *User = cast(U.getUser()); + auto *User = cast(U.getUser()); // Figure out which BB this ext is used in. BasicBlock *UserBB = User->getParent(); @@ -4593,7 +4593,7 @@ } } - CmpInst *Cmp = dyn_cast(SI->getCondition()); + auto *Cmp = dyn_cast(SI->getCondition()); // If a branch is predictable, an out-of-order CPU can avoid blocking on its // comparison condition. If the compare has more than one use, there's @@ -4636,7 +4636,7 @@ ASI.push_back(SI); for (BasicBlock::iterator It = ++BasicBlock::iterator(SI); It != SI->getParent()->end(); ++It) { - SelectInst *I = dyn_cast(&*It); + auto *I = dyn_cast(&*It); if (I && SI->getCondition() == I->getCondition()) { ASI.push_back(I); } else { @@ -4819,7 +4819,7 @@ bool MadeChange = false; for (User *U : SVI->users()) { - Instruction *UI = cast(U); + auto *UI = cast(U); // Figure out which BB this ext is used in. BasicBlock *UserBB = UI->getParent(); @@ -4984,7 +4984,7 @@ : -1; Type *PromotedType = getTransitionType(); - StoreInst *ST = cast(CombineInst); + auto *ST = cast(CombineInst); unsigned AS = ST->getPointerAddressSpace(); unsigned Align = ST->getAlignment(); // Check if this store is supported. @@ -5040,7 +5040,7 @@ // If we cannot determine where the constant must be, we have to // use a splat constant. Value *ValExtractIdx = Transition->getOperand(getTransitionIdx()); - if (ConstantInt *CstVal = dyn_cast(ValExtractIdx)) + if (auto *CstVal = dyn_cast(ValExtractIdx)) ExtractIdx = CstVal->getSExtValue(); else UseSplat = true; @@ -5232,7 +5232,7 @@ // If the transition has more than one use, assume this is not going to be // beneficial. while (Inst->hasOneUse()) { - Instruction *ToBePromoted = cast(*Inst->user_begin()); + auto *ToBePromoted = cast(*Inst->user_begin()); DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n'); if (ToBePromoted->getParent() != Parent) { @@ -5269,7 +5269,7 @@ if (InsertedInsts.count(I)) return false; - if (PHINode *P = dyn_cast(I)) { + if (auto *P = dyn_cast(I)) { // It is possible for very late stage optimizations (such as SimplifyCFG) // to introduce PHI nodes too late to be cleaned up. If we detect such a // trivial PHI, go ahead and zap it here. @@ -5282,7 +5282,7 @@ return false; } - if (CastInst *CI = dyn_cast(I)) { + if (auto *CI = dyn_cast(I)) { // If the source of the cast is a constant, then this should have // already been constant folded. The only reason NOT to constant fold // it is if something (e.g. LSR) was careful to place the constant @@ -5311,11 +5311,11 @@ return false; } - if (CmpInst *CI = dyn_cast(I)) + if (auto *CI = dyn_cast(I)) if (!TLI || !TLI->hasMultipleConditionRegisters()) return OptimizeCmpExpression(CI, TLI); - if (LoadInst *LI = dyn_cast(I)) { + if (auto *LI = dyn_cast(I)) { stripInvariantGroupMetadata(*LI); if (TLI) { bool Modified = optimizeLoadExt(LI); @@ -5326,7 +5326,7 @@ return false; } - if (StoreInst *SI = dyn_cast(I)) { + if (auto *SI = dyn_cast(I)) { stripInvariantGroupMetadata(*SI); if (TLI) { unsigned AS = SI->getPointerAddressSpace(); @@ -5336,18 +5336,18 @@ return false; } - BinaryOperator *BinOp = dyn_cast(I); + auto *BinOp = dyn_cast(I); if (BinOp && (BinOp->getOpcode() == Instruction::AShr || BinOp->getOpcode() == Instruction::LShr)) { - ConstantInt *CI = dyn_cast(BinOp->getOperand(1)); + auto *CI = dyn_cast(BinOp->getOperand(1)); if (TLI && CI && TLI->hasExtractBitsInsn()) return OptimizeExtractBits(BinOp, CI, *TLI, *DL); return false; } - if (GetElementPtrInst *GEPI = dyn_cast(I)) { + if (auto *GEPI = dyn_cast(I)) { if (GEPI->hasAllZeroIndices()) { /// The GEP operand must be a pointer, so must its result -> BitCast Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), @@ -5361,13 +5361,13 @@ return false; } - if (CallInst *CI = dyn_cast(I)) + if (auto *CI = dyn_cast(I)) return optimizeCallInst(CI, ModifiedDT); - if (SelectInst *SI = dyn_cast(I)) + if (auto *SI = dyn_cast(I)) return optimizeSelectInst(SI); - if (ShuffleVectorInst *SVI = dyn_cast(I)) + if (auto *SVI = dyn_cast(I)) return optimizeShuffleVectorInst(SVI); if (auto *Switch = dyn_cast(I)) @@ -5436,7 +5436,7 @@ Instruction *PrevNonDbgInst = nullptr; for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { Instruction *Insn = &*BI++; - DbgValueInst *DVI = dyn_cast(Insn); + auto *DVI = dyn_cast(Insn); // Leave dbg.values that refer to an alloca alone. These // instrinsics describe the address of a variable (= the alloca) // being taken. They should not be moved next to the alloca @@ -5447,7 +5447,7 @@ continue; } - Instruction *VI = dyn_cast_or_null(DVI->getValue()); + auto *VI = dyn_cast_or_null(DVI->getValue()); if (VI && VI != PrevNonDbgInst && !VI->isTerminator()) { // If VI is a phi in a block with an EHPad terminator, we can't insert // after it. @@ -5484,19 +5484,19 @@ // %andVal = and %val, #single-bit-set // %icmpVal = icmp %andResult, 0 // br i1 %cmpVal label %dest1, label %dest2" - BranchInst *Brcc = dyn_cast(BB.getTerminator()); + auto *Brcc = dyn_cast(BB.getTerminator()); if (!Brcc || !Brcc->isConditional()) continue; - ICmpInst *Cmp = dyn_cast(Brcc->getOperand(0)); + auto *Cmp = dyn_cast(Brcc->getOperand(0)); if (!Cmp || Cmp->getParent() != &BB) continue; - ConstantInt *Zero = dyn_cast(Cmp->getOperand(1)); + auto *Zero = dyn_cast(Cmp->getOperand(1)); if (!Zero || !Zero->isZero()) continue; - Instruction *And = dyn_cast(Cmp->getOperand(0)); + auto *And = dyn_cast(Cmp->getOperand(0)); if (!And || And->getOpcode() != Instruction::And || And->getParent() != &BB) continue; - ConstantInt* Mask = dyn_cast(And->getOperand(1)); + auto * Mask = dyn_cast(And->getOperand(1)); if (!Mask || !Mask->getUniqueInteger().isPowerOf2()) continue; DEBUG(dbgs() << "found and; icmp ?,0; brcc\n"); DEBUG(BB.dump()); @@ -5506,7 +5506,7 @@ // track of which BBs we insert into. for (Use &TheUse : Cmp->uses()) { // Find brcc use. - BranchInst *BrccUser = dyn_cast(TheUse); + auto *BrccUser = dyn_cast(TheUse); if (!BrccUser || !BrccUser->isConditional()) continue; BasicBlock *UserBB = BrccUser->getParent(); @@ -5634,7 +5634,7 @@ // Replace the old BB with the new BB. for (auto &I : *TBB) { - PHINode *PN = dyn_cast(&I); + auto *PN = dyn_cast(&I); if (!PN) break; int i; @@ -5644,7 +5644,7 @@ // Add another incoming edge form the new BB. for (auto &I : *FBB) { - PHINode *PN = dyn_cast(&I); + auto *PN = dyn_cast(&I); if (!PN) break; auto *Val = PN->getIncomingValueForBlock(&BB); Index: lib/CodeGen/DwarfEHPrepare.cpp =================================================================== --- lib/CodeGen/DwarfEHPrepare.cpp +++ lib/CodeGen/DwarfEHPrepare.cpp @@ -100,7 +100,7 @@ Value *DwarfEHPrepare::GetExceptionObject(ResumeInst *RI) { Value *V = RI->getOperand(0); Value *ExnObj = nullptr; - InsertValueInst *SelIVI = dyn_cast(V); + auto *SelIVI = dyn_cast(V); LoadInst *SelLoad = nullptr; InsertValueInst *ExcIVI = nullptr; bool EraseIVIs = false; Index: lib/CodeGen/GCRootLowering.cpp =================================================================== --- lib/CodeGen/GCRootLowering.cpp +++ lib/CodeGen/GCRootLowering.cpp @@ -140,7 +140,7 @@ return false; // llvm.gcroot is safe because it doesn't do anything at runtime. - if (CallInst *CI = dyn_cast(I)) + if (auto *CI = dyn_cast(I)) if (Function *F = CI->getCalledFunction()) if (Intrinsic::ID IID = F->getIntrinsicID()) if (IID == Intrinsic::gcroot) @@ -159,8 +159,8 @@ // Search for initializers in the initial BB. SmallPtrSet InitedRoots; for (; !CouldBecomeSafePoint(&*IP); ++IP) - if (StoreInst *SI = dyn_cast(IP)) - if (AllocaInst *AI = + if (auto *SI = dyn_cast(IP)) + if (auto *AI = dyn_cast(SI->getOperand(1)->stripPointerCasts())) InitedRoots.insert(AI); @@ -207,7 +207,7 @@ bool MadeChange = false; for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) { for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;) { - if (IntrinsicInst *CI = dyn_cast(II++)) { + if (auto *CI = dyn_cast(II++)) { Function *F = CI->getCalledFunction(); switch (F->getIntrinsicID()) { case Intrinsic::gcwrite: Index: lib/CodeGen/GlobalMerge.cpp =================================================================== --- lib/CodeGen/GlobalMerge.cpp +++ lib/CodeGen/GlobalMerge.cpp @@ -289,7 +289,7 @@ // This Use might be a ConstantExpr. We're interested in Instruction // users, so look through ConstantExpr... Use *UI, *UE; - if (ConstantExpr *CE = dyn_cast(U.getUser())) { + if (auto *CE = dyn_cast(U.getUser())) { if (CE->use_empty()) continue; UI = &*CE->use_begin(); @@ -304,7 +304,7 @@ // ...to iterate on all the instruction users of the global. // Note that we iterate on Uses and not on Users to be able to getNext(). for (; UI != UE; UI = UI->getNext()) { - Instruction *I = dyn_cast(UI->getUser()); + auto *I = dyn_cast(UI->getUser()); if (!I) continue; @@ -514,10 +514,10 @@ if (!GV || !GV->hasInitializer()) return; // Should be an array of 'i8*'. - const ConstantArray *InitList = cast(GV->getInitializer()); + const auto *InitList = cast(GV->getInitializer()); for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i) - if (const GlobalVariable *G = + if (const auto *G = dyn_cast(InitList->getOperand(i)->stripPointerCasts())) MustKeepGlobalVariables.insert(G); } @@ -533,7 +533,7 @@ // Keep globals used by landingpads and catchpads. for (const Use &U : Pad->operands()) { - if (const GlobalVariable *GV = + if (const auto *GV = dyn_cast(U->stripPointerCasts())) MustKeepGlobalVariables.insert(GV); } @@ -563,7 +563,7 @@ !GV.hasInternalLinkage()) continue; - PointerType *PT = dyn_cast(GV.getType()); + auto *PT = dyn_cast(GV.getType()); assert(PT && "Global variable is not a pointer!"); unsigned AddressSpace = PT->getAddressSpace(); Index: lib/CodeGen/InterleavedAccessPass.cpp =================================================================== --- lib/CodeGen/InterleavedAccessPass.cpp +++ lib/CodeGen/InterleavedAccessPass.cpp @@ -218,7 +218,7 @@ Extracts.push_back(Extract); continue; } - ShuffleVectorInst *SVI = dyn_cast(*UI); + auto *SVI = dyn_cast(*UI); if (!SVI || !isa(SVI->getOperand(1))) return false; @@ -344,7 +344,7 @@ if (!SI->isSimple()) return false; - ShuffleVectorInst *SVI = dyn_cast(SI->getValueOperand()); + auto *SVI = dyn_cast(SI->getValueOperand()); if (!SVI || !SVI->hasOneUse()) return false; @@ -380,10 +380,10 @@ bool Changed = false; for (auto &I : instructions(F)) { - if (LoadInst *LI = dyn_cast(&I)) + if (auto *LI = dyn_cast(&I)) Changed |= lowerInterleavedLoad(LI, DeadInsts); - if (StoreInst *SI = dyn_cast(&I)) + if (auto *SI = dyn_cast(&I)) Changed |= lowerInterleavedStore(SI, DeadInsts); } Index: lib/CodeGen/IntrinsicLowering.cpp =================================================================== --- lib/CodeGen/IntrinsicLowering.cpp +++ lib/CodeGen/IntrinsicLowering.cpp @@ -597,7 +597,7 @@ !CI->getType()->isIntegerTy()) return false; - IntegerType *Ty = dyn_cast(CI->getType()); + auto *Ty = dyn_cast(CI->getType()); if (!Ty) return false; Index: lib/CodeGen/LowerEmuTLS.cpp =================================================================== --- lib/CodeGen/LowerEmuTLS.cpp +++ lib/CodeGen/LowerEmuTLS.cpp @@ -96,7 +96,7 @@ const Constant *InitValue = nullptr; if (GV->hasInitializer()) { InitValue = GV->getInitializer(); - const ConstantInt *InitIntValue = dyn_cast(InitValue); + const auto *InitIntValue = dyn_cast(InitValue); // When GV's init value is all 0, omit the EmuTlsTmplVar and let // the emutls library function to reset newly allocated TLS variables. if (isa(InitValue) || Index: lib/CodeGen/MIRParser/MIRParser.cpp =================================================================== --- lib/CodeGen/MIRParser/MIRParser.cpp +++ lib/CodeGen/MIRParser/MIRParser.cpp @@ -276,7 +276,7 @@ void MIRParserImpl::createDummyFunction(StringRef Name, Module &M) { auto &Context = M.getContext(); - Function *F = cast(M.getOrInsertFunction( + auto *F = cast(M.getOrInsertFunction( Name, FunctionType::get(Type::getVoidTy(Context), false))); BasicBlock *BB = BasicBlock::Create(Context, "entry", F); new UnreachableInst(Context, BB); @@ -695,7 +695,7 @@ const auto &M = *MF.getFunction()->getParent(); SMDiagnostic Error; for (const auto &YamlConstant : YamlMF.Constants) { - const Constant *Value = dyn_cast_or_null( + const auto *Value = dyn_cast_or_null( parseConstantValue(YamlConstant.Value.Value, Error, M)); if (!Value) return error(Error, YamlConstant.Value.SourceRange); Index: lib/CodeGen/MachineFunction.cpp =================================================================== --- lib/CodeGen/MachineFunction.cpp +++ lib/CodeGen/MachineFunction.cpp @@ -742,7 +742,7 @@ dyn_cast(Val->stripPointerCasts())); } else { // Add filters in a list. - Constant *CVal = cast(Val); + auto *CVal = cast(Val); SmallVector FilterList; for (User::op_iterator II = CVal->op_begin(), IE = CVal->op_end(); II != IE; ++II) Index: lib/CodeGen/MachineLICM.cpp =================================================================== --- lib/CodeGen/MachineLICM.cpp +++ lib/CodeGen/MachineLICM.cpp @@ -336,7 +336,7 @@ for (const MachineMemOperand *MemOp : MI->memoperands()) { if (!MemOp->isStore() || !MemOp->getPseudoValue()) continue; - if (const FixedStackPseudoSourceValue *Value = + if (const auto *Value = dyn_cast(MemOp->getPseudoValue())) { if (Value->getFrameIndex() == FI) return true; Index: lib/CodeGen/MachineModuleInfo.cpp =================================================================== --- lib/CodeGen/MachineModuleInfo.cpp +++ lib/CodeGen/MachineModuleInfo.cpp @@ -320,7 +320,7 @@ void llvm::computeUsesVAFloatArgument(const CallInst &I, MachineModuleInfo &MMI) { - FunctionType *FT = + auto *FT = cast(I.getCalledValue()->getType()->getContainedType(0)); if (FT->isVarArg() && !MMI.usesVAFloatArgument()) { for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) { Index: lib/CodeGen/MachineRegisterInfo.cpp =================================================================== --- lib/CodeGen/MachineRegisterInfo.cpp +++ lib/CodeGen/MachineRegisterInfo.cpp @@ -492,7 +492,7 @@ for (const MachineOperand &MO : MI.operands()) { if (!MO.isGlobal()) continue; - const Function *Func = dyn_cast(MO.getGlobal()); + const auto *Func = dyn_cast(MO.getGlobal()); if (Func != nullptr) return Func; } Index: lib/CodeGen/MachineVerifier.cpp =================================================================== --- lib/CodeGen/MachineVerifier.cpp +++ lib/CodeGen/MachineVerifier.cpp @@ -1125,7 +1125,7 @@ for (auto *MMO : MI->memoperands()) { const PseudoSourceValue *PSV = MMO->getPseudoValue(); if (PSV == nullptr) continue; - const FixedStackPseudoSourceValue *Value = + const auto *Value = dyn_cast(PSV); if (Value == nullptr) continue; if (Value->getFrameIndex() != FI) continue; Index: lib/CodeGen/SafeStack.cpp =================================================================== --- lib/CodeGen/SafeStack.cpp +++ lib/CodeGen/SafeStack.cpp @@ -296,13 +296,13 @@ case Instruction::Invoke: { ImmutableCallSite CS(I); - if (const IntrinsicInst *II = dyn_cast(I)) { + if (const auto *II = dyn_cast(I)) { if (II->getIntrinsicID() == Intrinsic::lifetime_start || II->getIntrinsicID() == Intrinsic::lifetime_end) continue; } - if (const MemIntrinsic *MI = dyn_cast(I)) { + if (const auto *MI = dyn_cast(I)) { if (!IsMemIntrinsicSafe(MI, UI, AllocaPtr, AllocaSize)) { DEBUG(dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtr << "\n unsafe memintrinsic: " << *I @@ -573,7 +573,7 @@ std::string Name = std::string(AI->getName()) + ".unsafe"; while (!AI->use_empty()) { Use &U = *AI->use_begin(); - Instruction *User = cast(U.getUser()); + auto *User = cast(U.getUser()); Instruction *InsertBefore; if (auto *PHI = dyn_cast(User)) Index: lib/CodeGen/ScheduleDAGInstrs.cpp =================================================================== --- lib/CodeGen/ScheduleDAGInstrs.cpp +++ lib/CodeGen/ScheduleDAGInstrs.cpp @@ -105,7 +105,7 @@ /// looking through basic ptrtoint+arithmetic+inttoptr sequences. static const Value *getUnderlyingObjectFromInt(const Value *V) { do { - if (const Operator *U = dyn_cast(V)) { + if (const auto *U = dyn_cast(V)) { // If we find a ptrtoint, we can transfer control back to the // regular getUnderlyingObjectFromInt. if (U->getOpcode() == Instruction::PtrToInt) Index: lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -692,7 +692,7 @@ Op.getOperand(0), Flags); case ISD::FSUB: // fold (fneg (fsub 0, B)) -> B - if (ConstantFPSDNode *N0CFP = dyn_cast(Op.getOperand(0))) + if (auto *N0CFP = dyn_cast(Op.getOperand(0))) if (N0CFP->isZero()) return Op.getOperand(1); @@ -793,7 +793,7 @@ // integers (and undefs). // Do not permit build vector implicit truncation. static bool isConstantOrConstantVector(SDValue N, bool NoOpaques = false) { - if (ConstantSDNode *Const = dyn_cast(N)) + if (auto *Const = dyn_cast(N)) return !(Const->isOpaque() && NoOpaques); if (N.getOpcode() != ISD::BUILD_VECTOR) return false; @@ -801,7 +801,7 @@ for (const SDValue &Op : N->op_values()) { if (Op.isUndef()) continue; - ConstantSDNode *Const = dyn_cast(Op); + auto *Const = dyn_cast(Op); if (!Const || Const->getAPIntValue().getBitWidth() != BitWidth || (Const->isOpaque() && NoOpaques)) return false; @@ -988,7 +988,7 @@ Replace = false; SDLoc DL(Op); if (ISD::isUNINDEXEDLoad(Op.getNode())) { - LoadSDNode *LD = cast(Op); + auto *LD = cast(Op); EVT MemVT = LD->getMemoryVT(); ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD) ? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, PVT, MemVT) ? ISD::ZEXTLOAD @@ -1213,7 +1213,7 @@ SDLoc DL(Op); SDNode *N = Op.getNode(); - LoadSDNode *LD = cast(N); + auto *LD = cast(N); EVT MemVT = LD->getMemoryVT(); ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD) ? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, PVT, MemVT) ? ISD::ZEXTLOAD @@ -1658,7 +1658,7 @@ /// If \p N is a ConstantSDNode with isOpaque() == false return it casted to a /// ConstantSDNode pointer else nullptr. static ConstantSDNode *getAsNonOpaqueConstant(SDValue N) { - ConstantSDNode *Const = dyn_cast(N); + auto *Const = dyn_cast(N); return Const != nullptr && !Const->isOpaque() ? Const : nullptr; } @@ -1808,7 +1808,7 @@ // add X, (sextinreg Y i1) -> sub X, (and Y 1) if (N1.getOpcode() == ISD::SIGN_EXTEND_INREG) { - VTSDNode *TN = cast(N1.getOperand(1)); + auto *TN = cast(N1.getOperand(1)); if (TN->getVT() == MVT::i1) { SDValue ZExt = DAG.getNode(ISD::AND, DL, VT, N1.getOperand(0), DAG.getConstant(1, DL, VT)); @@ -1831,8 +1831,8 @@ SDLoc(N), MVT::Glue)); // canonicalize constant to RHS. - ConstantSDNode *N0C = dyn_cast(N0); - ConstantSDNode *N1C = dyn_cast(N1); + auto *N0C = dyn_cast(N0); + auto *N1C = dyn_cast(N1); if (N0C && !N1C) return DAG.getNode(ISD::ADDC, SDLoc(N), N->getVTList(), N1, N0); @@ -1866,8 +1866,8 @@ SDValue CarryIn = N->getOperand(2); // canonicalize constant to RHS - ConstantSDNode *N0C = dyn_cast(N0); - ConstantSDNode *N1C = dyn_cast(N1); + auto *N0C = dyn_cast(N0); + auto *N1C = dyn_cast(N1); if (N0C && !N1C) return DAG.getNode(ISD::ADDE, SDLoc(N), N->getVTList(), N1, N0, CarryIn); @@ -2009,7 +2009,7 @@ return N1; // If the relocation model supports it, consider symbol offsets. - if (GlobalAddressSDNode *GA = dyn_cast(N0)) + if (auto *GA = dyn_cast(N0)) if (!LegalOperations && TLI.isOffsetFoldingLegal(GA)) { // fold (sub Sym, c) -> Sym-c if (N1C && GA->getOpcode() == ISD::GlobalAddress) @@ -2017,7 +2017,7 @@ GA->getOffset() - (uint64_t)N1C->getSExtValue()); // fold (sub Sym+c1, Sym+c2) -> c1-c2 - if (GlobalAddressSDNode *GB = dyn_cast(N1)) + if (auto *GB = dyn_cast(N1)) if (GA->getGlobal() == GB->getGlobal()) return DAG.getConstant((uint64_t)GA->getOffset() - GB->getOffset(), DL, VT); @@ -2025,7 +2025,7 @@ // sub X, (sextinreg Y i1) -> add X, (and Y 1) if (N1.getOpcode() == ISD::SIGN_EXTEND_INREG) { - VTSDNode *TN = cast(N1.getOperand(1)); + auto *TN = cast(N1.getOperand(1)); if (TN->getVT() == MVT::i1) { SDValue ZExt = DAG.getNode(ISD::AND, DL, VT, N1.getOperand(0), DAG.getConstant(1, DL, VT)); @@ -2728,7 +2728,7 @@ SDValue DAGCombiner::visitSMULO(SDNode *N) { // (smulo x, 2) -> (saddo x, x) - if (ConstantSDNode *C2 = dyn_cast(N->getOperand(1))) + if (auto *C2 = dyn_cast(N->getOperand(1))) if (C2->getAPIntValue() == 2) return DAG.getNode(ISD::SADDO, SDLoc(N), N->getVTList(), N->getOperand(0), N->getOperand(0)); @@ -2738,7 +2738,7 @@ SDValue DAGCombiner::visitUMULO(SDNode *N) { // (umulo x, 2) -> (uaddo x, x) - if (ConstantSDNode *C2 = dyn_cast(N->getOperand(1))) + if (auto *C2 = dyn_cast(N->getOperand(1))) if (C2->getAPIntValue() == 2) return DAG.getNode(ISD::UADDO, SDLoc(N), N->getVTList(), N->getOperand(0), N->getOperand(0)); @@ -2863,8 +2863,8 @@ // or second operand, then it might still be profitable to move the shuffle // after the xor/and/or operation. if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG) { - ShuffleVectorSDNode *SVN0 = cast(N0); - ShuffleVectorSDNode *SVN1 = cast(N1); + auto *SVN0 = cast(N0); + auto *SVN1 = cast(N1); assert(N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType() && "Inputs to shuffles are not the same type"); @@ -3015,14 +3015,14 @@ if (N0.getOpcode() == ISD::ADD && N1.getOpcode() == ISD::SRL && VT.getSizeInBits() <= 64) { - if (ConstantSDNode *ADDI = dyn_cast(N0.getOperand(1))) { + if (auto *ADDI = dyn_cast(N0.getOperand(1))) { APInt ADDC = ADDI->getAPIntValue(); if (!TLI.isLegalAddImmediate(ADDC.getSExtValue())) { // Look for (and (add x, c1), (lshr y, c2)). If C1 wasn't a legal // immediate for an add, but it is legal if its top c2 bits are set, // transform the ADD so the immediate doesn't need to be materialized // in a register. - if (ConstantSDNode *SRLI = dyn_cast(N1.getOperand(1))) { + if (auto *SRLI = dyn_cast(N1.getOperand(1))) { APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), SRLI->getZExtValue()); if (DAG.MaskedValueIsZero(N0.getOperand(1), Mask)) { @@ -3046,8 +3046,8 @@ // (and (srl i64:x, K), KMask) -> // (i64 zero_extend (and (srl (i32 (trunc i64:x)), K)), KMask) if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) { - if (ConstantSDNode *CAnd = dyn_cast(N1)) { - if (ConstantSDNode *CShift = dyn_cast(N0.getOperand(1))) { + if (auto *CAnd = dyn_cast(N1)) { + if (auto *CShift = dyn_cast(N0.getOperand(1))) { unsigned Size = VT.getSizeInBits(); const APInt &AndMask = CAnd->getAPIntValue(); unsigned ShiftBits = CShift->getZExtValue(); @@ -3220,16 +3220,16 @@ N0.getOperand(0).getOpcode() == ISD::LOAD && N0.getOperand(0).getResNo() == 0) || (N0.getOpcode() == ISD::LOAD && N0.getResNo() == 0)) { - LoadSDNode *Load = cast( (N0.getOpcode() == ISD::LOAD) ? + auto *Load = cast( (N0.getOpcode() == ISD::LOAD) ? N0 : N0.getOperand(0) ); // Get the constant (if applicable) the zero'th operand is being ANDed with. // This can be a pure constant or a vector splat, in which case we treat the // vector as a scalar and use the splat value. APInt Constant = APInt::getNullValue(1); - if (const ConstantSDNode *C = dyn_cast(N1)) { + if (const auto *C = dyn_cast(N1)) { Constant = C->getAPIntValue(); - } else if (BuildVectorSDNode *Vector = dyn_cast(N1)) { + } else if (auto *Vector = dyn_cast(N1)) { APInt SplatValue, SplatUndef; unsigned SplatBitSize; bool HasAnyUndefs; @@ -3408,7 +3408,7 @@ // fold (zext_inreg (extload x)) -> (zextload x) if (ISD::isEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode())) { - LoadSDNode *LN0 = cast(N0); + auto *LN0 = cast(N0); EVT MemVT = LN0->getMemoryVT(); // If we zero all the possible extended bits, then we can turn this into // a zextload if we are running before legalize or the operation is legal. @@ -3428,7 +3428,7 @@ // fold (zext_inreg (sextload x)) -> (zextload x) iff load has one use if (ISD::isSEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) { - LoadSDNode *LN0 = cast(N0); + auto *LN0 = cast(N0); EVT MemVT = LN0->getMemoryVT(); // If we zero all the possible extended bits, then we can turn this into // a zextload if we are running before legalize or the operation is legal. @@ -3477,7 +3477,7 @@ if (N0.getOpcode() == ISD::AND) { if (!N0.getNode()->hasOneUse()) return SDValue(); - ConstantSDNode *N01C = dyn_cast(N0.getOperand(1)); + auto *N01C = dyn_cast(N0.getOperand(1)); if (!N01C || N01C->getZExtValue() != 0xFF00) return SDValue(); N0 = N0.getOperand(0); @@ -3487,7 +3487,7 @@ if (N1.getOpcode() == ISD::AND) { if (!N1.getNode()->hasOneUse()) return SDValue(); - ConstantSDNode *N11C = dyn_cast(N1.getOperand(1)); + auto *N11C = dyn_cast(N1.getOperand(1)); if (!N11C || N11C->getZExtValue() != 0xFF) return SDValue(); N1 = N1.getOperand(0); @@ -3501,8 +3501,8 @@ if (!N0.getNode()->hasOneUse() || !N1.getNode()->hasOneUse()) return SDValue(); - ConstantSDNode *N01C = dyn_cast(N0.getOperand(1)); - ConstantSDNode *N11C = dyn_cast(N1.getOperand(1)); + auto *N01C = dyn_cast(N0.getOperand(1)); + auto *N11C = dyn_cast(N1.getOperand(1)); if (!N01C || !N11C) return SDValue(); if (N01C->getZExtValue() != 8 || N11C->getZExtValue() != 8) @@ -3513,7 +3513,7 @@ if (!LookPassAnd0 && N00.getOpcode() == ISD::AND) { if (!N00.getNode()->hasOneUse()) return SDValue(); - ConstantSDNode *N001C = dyn_cast(N00.getOperand(1)); + auto *N001C = dyn_cast(N00.getOperand(1)); if (!N001C || N001C->getZExtValue() != 0xFF) return SDValue(); N00 = N00.getOperand(0); @@ -3524,7 +3524,7 @@ if (!LookPassAnd1 && N10.getOpcode() == ISD::AND) { if (!N10.getNode()->hasOneUse()) return SDValue(); - ConstantSDNode *N101C = dyn_cast(N10.getOperand(1)); + auto *N101C = dyn_cast(N10.getOperand(1)); if (!N101C || N101C->getZExtValue() != 0xFF00) return SDValue(); N10 = N10.getOperand(0); @@ -3576,7 +3576,7 @@ if (Opc != ISD::AND && Opc != ISD::SHL && Opc != ISD::SRL) return false; - ConstantSDNode *N1C = dyn_cast(N.getOperand(1)); + auto *N1C = dyn_cast(N.getOperand(1)); if (!N1C) return false; @@ -3598,7 +3598,7 @@ // (x >> 8) & 0xff0000 if (N0.getOpcode() != ISD::SRL) return false; - ConstantSDNode *C = dyn_cast(N0.getOperand(1)); + auto *C = dyn_cast(N0.getOperand(1)); if (!C || C->getZExtValue() != 8) return false; } else { @@ -3606,7 +3606,7 @@ // (x << 8) & 0xff000000 if (N0.getOpcode() != ISD::SHL) return false; - ConstantSDNode *C = dyn_cast(N0.getOperand(1)); + auto *C = dyn_cast(N0.getOperand(1)); if (!C || C->getZExtValue() != 8) return false; } @@ -3615,7 +3615,7 @@ // (x & 0xff0000) << 8 if (Num != 0 && Num != 2) return false; - ConstantSDNode *C = dyn_cast(N.getOperand(1)); + auto *C = dyn_cast(N.getOperand(1)); if (!C || C->getZExtValue() != 8) return false; } else { // Opc == ISD::SRL @@ -3623,7 +3623,7 @@ // (x & 0xff000000) >> 8 if (Num != 1 && Num != 3) return false; - ConstantSDNode *C = dyn_cast(N.getOperand(1)); + auto *C = dyn_cast(N.getOperand(1)); if (!C || C->getZExtValue() != 8) return false; } @@ -3861,8 +3861,8 @@ if ((ZeroN00 || ZeroN01) && (ZeroN10 || ZeroN11)) { assert((!ZeroN00 || !ZeroN01) && "Both inputs zero!"); assert((!ZeroN10 || !ZeroN11) && "Both inputs zero!"); - const ShuffleVectorSDNode *SV0 = cast(N0); - const ShuffleVectorSDNode *SV1 = cast(N1); + const auto *SV0 = cast(N0); + const auto *SV1 = cast(N1); bool CanFold = true; int NumElts = VT.getVectorNumElements(); SmallVector Mask(NumElts); @@ -3917,7 +3917,7 @@ // fold (or c1, c2) -> c1|c2 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0); - ConstantSDNode *N1C = dyn_cast(N1); + auto *N1C = dyn_cast(N1); if (N0C && N1C && !N1C->isOpaque()) return DAG.FoldConstantArithmetic(ISD::OR, SDLoc(N), VT, N0C, N1C); // canonicalize constant to RHS @@ -3950,7 +3950,7 @@ // iff (c1 & c2) == 0. if (N1C && N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse() && isa(N0.getOperand(1))) { - ConstantSDNode *C1 = cast(N0.getOperand(1)); + auto *C1 = cast(N0.getOperand(1)); if ((C1->getAPIntValue() & N1C->getAPIntValue()) != 0) { if (SDValue COR = DAG.FoldConstantArithmetic(ISD::OR, SDLoc(N1), VT, N1C, C1)) @@ -4518,14 +4518,14 @@ if (SDValue FoldedVOp = SimplifyVBinOp(N)) return FoldedVOp; - BuildVectorSDNode *N1CV = dyn_cast(N1); + auto *N1CV = dyn_cast(N1); // If setcc produces all-one true value then: // (shl (and (setcc) N01CV) N1CV) -> (and (setcc) N01CV<isConstant()) { if (N0.getOpcode() == ISD::AND) { SDValue N00 = N0->getOperand(0); SDValue N01 = N0->getOperand(1); - BuildVectorSDNode *N01CV = dyn_cast(N01); + auto *N01CV = dyn_cast(N01); if (N01CV && N01CV->isConstant() && N00.getOpcode() == ISD::SETCC && TLI.getBooleanContents(N00.getOperand(0).getValueType()) == @@ -5239,7 +5239,7 @@ // fold (select C, X, X) -> X if (N1 == N2) return N1; - if (const ConstantSDNode *N0C = dyn_cast(N0)) { + if (const auto *N0C = dyn_cast(N0)) { // fold (select true, X, Y) -> X // fold (select false, X, Y) -> Y return !N0C->isNullValue() ? N1 : N2; @@ -5480,7 +5480,7 @@ if (Level >= AfterLegalizeTypes) return SDValue(); - MaskedScatterSDNode *MSC = cast(N); + auto *MSC = cast(N); SDValue Mask = MSC->getMask(); SDValue Data = MSC->getValue(); SDLoc DL(N); @@ -5541,7 +5541,7 @@ if (Level >= AfterLegalizeTypes) return SDValue(); - MaskedStoreSDNode *MST = dyn_cast(N); + auto *MST = dyn_cast(N); SDValue Mask = MST->getMask(); SDValue Data = MST->getValue(); EVT VT = Data.getValueType(); @@ -5613,7 +5613,7 @@ if (Level >= AfterLegalizeTypes) return SDValue(); - MaskedGatherSDNode *MGT = dyn_cast(N); + auto *MGT = dyn_cast(N); SDValue Mask = MGT->getMask(); SDLoc DL(N); @@ -5690,7 +5690,7 @@ if (Level >= AfterLegalizeTypes) return SDValue(); - MaskedLoadSDNode *MLD = dyn_cast(N); + auto *MLD = dyn_cast(N); SDValue Mask = MLD->getMask(); SDLoc DL(N); @@ -5879,7 +5879,7 @@ CC, SDLoc(N), false)) { AddToWorklist(SCC.getNode()); - if (ConstantSDNode *SCCC = dyn_cast(SCC.getNode())) { + if (auto *SCCC = dyn_cast(SCC.getNode())) { if (!SCCC->isNullValue()) return N2; // cond always true -> true val else @@ -6098,7 +6098,7 @@ if (N0->getOpcode() != ISD::LOAD) return SDValue(); - LoadSDNode *LN0 = cast(N0); + auto *LN0 = cast(N0); if (!ISD::isNON_EXTLoad(LN0) || !ISD::isUNINDEXEDLoad(LN0) || !N0.hasOneUse() || LN0->isVolatile() || !DstVT.isVector() || @@ -6241,7 +6241,7 @@ if (VT.isVector()) DoXform &= TLI.isVectorLoadExtDesirable(SDValue(N, 0)); if (DoXform) { - LoadSDNode *LN0 = cast(N0); + auto *LN0 = cast(N0); SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT, LN0->getChain(), LN0->getBasePtr(), N0.getValueType(), @@ -6265,7 +6265,7 @@ // fold (sext ( extload x)) -> (sext (truncate (sextload x))) if ((ISD::isSEXTLoad(N0.getNode()) || ISD::isEXTLoad(N0.getNode())) && ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) { - LoadSDNode *LN0 = cast(N0); + auto *LN0 = cast(N0); EVT MemVT = LN0->getMemoryVT(); if ((!LegalOperations && !LN0->isVolatile()) || TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, MemVT)) { @@ -6290,7 +6290,7 @@ N0.getOperand(1).getOpcode() == ISD::Constant && TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, N0.getValueType()) && (!LegalOperations && TLI.isOperationLegal(N0.getOpcode(), VT))) { - LoadSDNode *LN0 = cast(N0.getOperand(0)); + auto *LN0 = cast(N0.getOperand(0)); if (LN0->getExtensionType() != ISD::ZEXTLOAD && LN0->isUnindexed()) { bool DoXform = true; SmallVector SetCCs; @@ -6565,7 +6565,7 @@ if (VT.isVector()) DoXform &= TLI.isVectorLoadExtDesirable(SDValue(N, 0)); if (DoXform) { - LoadSDNode *LN0 = cast(N0); + auto *LN0 = cast(N0); SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N), VT, LN0->getChain(), LN0->getBasePtr(), N0.getValueType(), @@ -6596,7 +6596,7 @@ N0.getOperand(1).getOpcode() == ISD::Constant && TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, N0.getValueType()) && (!LegalOperations && TLI.isOperationLegal(N0.getOpcode(), VT))) { - LoadSDNode *LN0 = cast(N0.getOperand(0)); + auto *LN0 = cast(N0.getOperand(0)); if (LN0->getExtensionType() != ISD::SEXTLOAD && LN0->isUnindexed()) { bool DoXform = true; SmallVector SetCCs; @@ -6640,7 +6640,7 @@ // fold (zext ( extload x)) -> (zext (truncate (zextload x))) if ((ISD::isZEXTLoad(N0.getNode()) || ISD::isEXTLoad(N0.getNode())) && ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) { - LoadSDNode *LN0 = cast(N0); + auto *LN0 = cast(N0); EVT MemVT = LN0->getMemoryVT(); if ((!LegalOperations && !LN0->isVolatile()) || TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, MemVT)) { @@ -6805,7 +6805,7 @@ if (!N0.hasOneUse()) DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::ANY_EXTEND, SetCCs, TLI); if (DoXform) { - LoadSDNode *LN0 = cast(N0); + auto *LN0 = cast(N0); SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT, LN0->getChain(), LN0->getBasePtr(), N0.getValueType(), @@ -6826,7 +6826,7 @@ if (N0.getOpcode() == ISD::LOAD && !ISD::isNON_EXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) { - LoadSDNode *LN0 = cast(N0); + auto *LN0 = cast(N0); ISD::LoadExtType ExtType = LN0->getExtensionType(); EVT MemVT = LN0->getMemoryVT(); if (!LegalOperations || TLI.isLoadExtLegal(ExtType, VT, MemVT)) { @@ -6891,7 +6891,7 @@ switch (V.getOpcode()) { default: break; case ISD::Constant: { - const ConstantSDNode *CV = cast(V.getNode()); + const auto *CV = cast(V.getNode()); assert(CV && "Const value should be ConstSDNode."); const APInt &CVal = CV->getAPIntValue(); APInt NewVal = CVal & Mask; @@ -6952,7 +6952,7 @@ // Another special-case: SRL is basically zero-extending a narrower value. ExtType = ISD::ZEXTLOAD; N0 = SDValue(N, 0); - ConstantSDNode *N01 = dyn_cast(N0.getOperand(1)); + auto *N01 = dyn_cast(N0.getOperand(1)); if (!N01) return SDValue(); ExtVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits() - N01->getZExtValue()); @@ -6969,7 +6969,7 @@ unsigned ShAmt = 0; if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) { - if (ConstantSDNode *N01 = dyn_cast(N0.getOperand(1))) { + if (auto *N01 = dyn_cast(N0.getOperand(1))) { ShAmt = N01->getZExtValue(); // Is the shift amount a multiple of size of VT? if ((ShAmt & (EVTBits-1)) == 0) { @@ -7001,7 +7001,7 @@ unsigned ShLeftAmt = 0; if (ShAmt == 0 && N0.getOpcode() == ISD::SHL && N0.hasOneUse() && ExtVT == VT && TLI.isNarrowingProfitable(N0.getValueType(), VT)) { - if (ConstantSDNode *N01 = dyn_cast(N0.getOperand(1))) { + if (auto *N01 = dyn_cast(N0.getOperand(1))) { ShLeftAmt = N01->getZExtValue(); N0 = N0.getOperand(0); } @@ -7013,7 +7013,7 @@ return SDValue(); // Don't change the width of a volatile load. - LoadSDNode *LN0 = cast(N0); + auto *LN0 = cast(N0); if (LN0->isVolatile()) return SDValue(); @@ -7164,7 +7164,7 @@ // fold (sext_in_reg (srl X, 23), i8) -> (sra X, 23) iff possible. // We already fold "(sext_in_reg (srl X, 25), i8) -> srl X, 25" above. if (N0.getOpcode() == ISD::SRL) { - if (ConstantSDNode *ShAmt = dyn_cast(N0.getOperand(1))) + if (auto *ShAmt = dyn_cast(N0.getOperand(1))) if (ShAmt->getZExtValue()+EVTBits <= VTBits) { // We can turn this into an SRA iff the input to the SRL is already sign // extended enough. @@ -7181,7 +7181,7 @@ EVT == cast(N0)->getMemoryVT() && ((!LegalOperations && !cast(N0)->isVolatile()) || TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, EVT))) { - LoadSDNode *LN0 = cast(N0); + auto *LN0 = cast(N0); SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT, LN0->getChain(), LN0->getBasePtr(), EVT, @@ -7197,7 +7197,7 @@ EVT == cast(N0)->getMemoryVT() && ((!LegalOperations && !cast(N0)->isVolatile()) || TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, EVT))) { - LoadSDNode *LN0 = cast(N0); + auto *LN0 = cast(N0); SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT, LN0->getChain(), LN0->getBasePtr(), EVT, @@ -7398,7 +7398,7 @@ // Handle the case where the load remains an extending load even // after truncation. if (N0.hasOneUse() && ISD::isUNINDEXEDLoad(N0.getNode())) { - LoadSDNode *LN0 = cast(N0); + auto *LN0 = cast(N0); if (!LN0->isVolatile() && LN0->getMemoryVT().getStoreSizeInBits() < VT.getSizeInBits()) { SDValue NewLoad = DAG.getExtLoad(LN0->getExtensionType(), SDLoc(LN0), @@ -7490,8 +7490,8 @@ SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, EVT VT) { assert(N->getOpcode() == ISD::BUILD_PAIR); - LoadSDNode *LD1 = dyn_cast(getBuildPairElt(N, 0)); - LoadSDNode *LD2 = dyn_cast(getBuildPairElt(N, 1)); + auto *LD1 = dyn_cast(getBuildPairElt(N, 0)); + auto *LD2 = dyn_cast(getBuildPairElt(N, 1)); if (!LD1 || !LD2 || !ISD::isNON_EXTLoad(LD1) || !LD1->hasOneUse() || LD1->getAddressSpace() != LD2->getAddressSpace()) return SDValue(); @@ -7552,7 +7552,7 @@ // Fold (bitcast int (and (bitcast fp X to int), 0x7fff...) to fp) -> fabs X // Fold (bitcast int (xor (bitcast fp X to int), 0x8000...) to fp) -> fneg X SDValue LogicOp0 = N0.getOperand(0); - ConstantSDNode *LogicOp1 = dyn_cast(N0.getOperand(1)); + auto *LogicOp1 = dyn_cast(N0.getOperand(1)); if (LogicOp1 && LogicOp1->getAPIntValue() == SignMask && LogicOp0.getOpcode() == ISD::BITCAST && LogicOp0->getOperand(0).getValueType() == VT) @@ -7608,7 +7608,7 @@ TLI.hasBigEndianPartOrdering(VT, DAG.getDataLayout()) && (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)) && TLI.isLoadBitCastBeneficial(N0.getValueType(), VT)) { - LoadSDNode *LN0 = cast(N0); + auto *LN0 = cast(N0); unsigned OrigAlign = LN0->getAlignment(); bool Fast = false; @@ -7766,7 +7766,7 @@ N0->getOpcode() == ISD::VECTOR_SHUFFLE && VT.getVectorNumElements() >= N0.getValueType().getVectorNumElements() && !(VT.getVectorNumElements() % N0.getValueType().getVectorNumElements())) { - ShuffleVectorSDNode *SVN = cast(N0); + auto *SVN = cast(N0); // If operands are a bitcast, peek through if it casts the original VT. // If operands are a constant, just bitcast back to original VT. @@ -8805,8 +8805,8 @@ SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); SDValue N2 = N->getOperand(2); - ConstantFPSDNode *N0CFP = dyn_cast(N0); - ConstantFPSDNode *N1CFP = dyn_cast(N1); + auto *N0CFP = dyn_cast(N0); + auto *N1CFP = dyn_cast(N1); EVT VT = N->getValueType(0); SDLoc DL(N); const TargetOptions &Options = DAG.getTarget().Options; @@ -8914,7 +8914,7 @@ // Skip if current node is a reciprocal. SDValue N0 = N->getOperand(0); - ConstantFPSDNode *N0CFP = dyn_cast(N0); + auto *N0CFP = dyn_cast(N0); if (N0CFP && N0CFP->isExactlyValue(1.0)) return SDValue(); @@ -8966,8 +8966,8 @@ SDValue DAGCombiner::visitFDIV(SDNode *N) { SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); - ConstantFPSDNode *N0CFP = dyn_cast(N0); - ConstantFPSDNode *N1CFP = dyn_cast(N1); + auto *N0CFP = dyn_cast(N0); + auto *N1CFP = dyn_cast(N1); EVT VT = N->getValueType(0); SDLoc DL(N); const TargetOptions &Options = DAG.getTarget().Options; @@ -9076,8 +9076,8 @@ SDValue DAGCombiner::visitFREM(SDNode *N) { SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); - ConstantFPSDNode *N0CFP = dyn_cast(N0); - ConstantFPSDNode *N1CFP = dyn_cast(N1); + auto *N0CFP = dyn_cast(N0); + auto *N1CFP = dyn_cast(N1); EVT VT = N->getValueType(0); // fold (frem c1, c2) -> fmod(c1,c2) @@ -9123,8 +9123,8 @@ SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) { SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); - ConstantFPSDNode *N0CFP = dyn_cast(N0); - ConstantFPSDNode *N1CFP = dyn_cast(N1); + auto *N0CFP = dyn_cast(N0); + auto *N1CFP = dyn_cast(N1); EVT VT = N->getValueType(0); if (N0CFP && N1CFP) // Constant fold @@ -9328,7 +9328,7 @@ SDValue DAGCombiner::visitFP_ROUND(SDNode *N) { SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); - ConstantFPSDNode *N0CFP = dyn_cast(N0); + auto *N0CFP = dyn_cast(N0); EVT VT = N->getValueType(0); // fold (fp_round c1fp) -> c1fp @@ -9382,7 +9382,7 @@ SDValue N0 = N->getOperand(0); EVT VT = N->getValueType(0); EVT EVT = cast(N->getOperand(1))->getVT(); - ConstantFPSDNode *N0CFP = dyn_cast(N0); + auto *N0CFP = dyn_cast(N0); // fold (fp_round_inreg c1fp) -> c1fp if (N0CFP && isTypeLegal(EVT)) { @@ -9427,7 +9427,7 @@ // fold (fpext (load x)) -> (fpext (fptrunc (extload x))) if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() && TLI.isLoadExtLegal(ISD::EXTLOAD, VT, N0.getValueType())) { - LoadSDNode *LN0 = cast(N0); + auto *LN0 = cast(N0); SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT, LN0->getChain(), LN0->getBasePtr(), N0.getValueType(), @@ -9519,7 +9519,7 @@ // (fneg (fmul c, x)) -> (fmul -c, x) if (N0.getOpcode() == ISD::FMUL && (N0.getNode()->hasOneUse() || !TLI.isFNegFree(VT))) { - ConstantFPSDNode *CFP1 = dyn_cast(N0.getOperand(1)); + auto *CFP1 = dyn_cast(N0.getOperand(1)); if (CFP1) { APFloat CVal = CFP1->getValueAPF(); CVal.changeSign(); @@ -9772,7 +9772,7 @@ // Operand List for BR_CC: Chain, CondCC, CondLHS, CondRHS, DestBB. // SDValue DAGCombiner::visitBR_CC(SDNode *N) { - CondCodeSDNode *CC = cast(N->getOperand(1)); + auto *CC = cast(N->getOperand(1)); SDValue CondLHS = N->getOperand(2), CondRHS = N->getOperand(3); // If N is a constant we could fold this into a fallthrough or unconditional @@ -9805,12 +9805,12 @@ EVT VT; unsigned AS; - if (LoadSDNode *LD = dyn_cast(Use)) { + if (auto *LD = dyn_cast(Use)) { if (LD->isIndexed() || LD->getBasePtr().getNode() != N) return false; VT = LD->getMemoryVT(); AS = LD->getAddressSpace(); - } else if (StoreSDNode *ST = dyn_cast(Use)) { + } else if (auto *ST = dyn_cast(Use)) { if (ST->isIndexed() || ST->getBasePtr().getNode() != N) return false; VT = ST->getMemoryVT(); @@ -9820,7 +9820,7 @@ TargetLowering::AddrMode AM; if (N->getOpcode() == ISD::ADD) { - ConstantSDNode *Offset = dyn_cast(N->getOperand(1)); + auto *Offset = dyn_cast(N->getOperand(1)); if (Offset) // [reg +/- imm] AM.BaseOffs = Offset->getSExtValue(); @@ -9828,7 +9828,7 @@ // [reg +/- reg] AM.Scale = 1; } else if (N->getOpcode() == ISD::SUB) { - ConstantSDNode *Offset = dyn_cast(N->getOperand(1)); + auto *Offset = dyn_cast(N->getOperand(1)); if (Offset) // [reg +/- imm] AM.BaseOffs = -Offset->getSExtValue(); @@ -9854,7 +9854,7 @@ bool isLoad = true; SDValue Ptr; EVT VT; - if (LoadSDNode *LD = dyn_cast(N)) { + if (auto *LD = dyn_cast(N)) { if (LD->isIndexed()) return false; VT = LD->getMemoryVT(); @@ -9862,7 +9862,7 @@ !TLI.isIndexedLoadLegal(ISD::PRE_DEC, VT)) return false; Ptr = LD->getBasePtr(); - } else if (StoreSDNode *ST = dyn_cast(N)) { + } else if (auto *ST = dyn_cast(N)) { if (ST->isIndexed()) return false; VT = ST->getMemoryVT(); @@ -10032,7 +10032,7 @@ // Therefore, we have: // t0 = (x0 * offset0 - x1 * y0 * y1 *offset1) + (y0 * y1) * t1 - ConstantSDNode *CN = + auto *CN = cast(OtherUses[i]->getOperand(OffsetIdx)); int X0, X1, Y0, Y1; const APInt &Offset0 = CN->getAPIntValue(); @@ -10081,7 +10081,7 @@ bool isLoad = true; SDValue Ptr; EVT VT; - if (LoadSDNode *LD = dyn_cast(N)) { + if (auto *LD = dyn_cast(N)) { if (LD->isIndexed()) return false; VT = LD->getMemoryVT(); @@ -10089,7 +10089,7 @@ !TLI.isIndexedLoadLegal(ISD::POST_DEC, VT)) return false; Ptr = LD->getBasePtr(); - } else if (StoreSDNode *ST = dyn_cast(N)) { + } else if (auto *ST = dyn_cast(N)) { if (ST->isIndexed()) return false; VT = ST->getMemoryVT(); @@ -10204,7 +10204,7 @@ !cast(Inc)->isOpaque()) && "Cannot split out indexing using opaque target constants"); if (Inc.getOpcode() == ISD::TargetConstant) { - ConstantSDNode *ConstInc = cast(Inc); + auto *ConstInc = cast(Inc); Inc = DAG.getConstant(*ConstInc->getConstantIntValue(), SDLoc(Inc), ConstInc->getValueType(0)); } @@ -10215,7 +10215,7 @@ } SDValue DAGCombiner::visitLOAD(SDNode *N) { - LoadSDNode *LD = cast(N); + auto *LD = cast(N); SDValue Chain = LD->getChain(); SDValue Ptr = LD->getBasePtr(); @@ -10289,7 +10289,7 @@ if (OptLevel != CodeGenOpt::None && ISD::isNormalLoad(N) && !LD->isVolatile()) { if (ISD::isNON_TRUNCStore(Chain.getNode())) { - StoreSDNode *PrevST = cast(Chain); + auto *PrevST = cast(Chain); if (PrevST->getBasePtr() == Ptr && PrevST->getValue().getValueType() == N->getValueType(0)) return CombineTo(N, Chain.getOperand(1), Chain); @@ -10811,7 +10811,7 @@ if (Level < AfterLegalizeDAG) return false; - LoadSDNode *LD = cast(N); + auto *LD = cast(N); if (LD->isVolatile() || !ISD::isNormalLoad(LD) || !LD->getValueType(0).isInteger()) return false; @@ -10916,7 +10916,7 @@ return Result; // Check the chain and pointer. - LoadSDNode *LD = cast(V->getOperand(0)); + auto *LD = cast(V->getOperand(0)); if (LD->getBasePtr() != Ptr) return Result; // Not from same pointer. // The store should be chained directly to the load or be an operand of a @@ -11043,7 +11043,7 @@ /// narrowing the load and store if it would end up being a win for performance /// or code size. SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) { - StoreSDNode *ST = cast(N); + auto *ST = cast(N); if (ST->isVolatile()) return SDValue(); @@ -11085,7 +11085,7 @@ SDValue N0 = Value.getOperand(0); if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() && Chain == SDValue(N0.getNode(), 1)) { - LoadSDNode *LD = cast(N0); + auto *LD = cast(N0); if (LD->getBasePtr() != Ptr || LD->getPointerInfo().getAddrSpace() != ST->getPointerInfo().getAddrSpace()) @@ -11168,13 +11168,13 @@ /// by any other operations, then consider transforming the pair to integer /// load / store operations if the target deems the transformation profitable. SDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) { - StoreSDNode *ST = cast(N); + auto *ST = cast(N); SDValue Chain = ST->getChain(); SDValue Value = ST->getValue(); if (ISD::isNormalStore(ST) && ISD::isNormalLoad(Value.getNode()) && Value.hasOneUse() && Chain == SDValue(Value.getNode(), 1)) { - LoadSDNode *LD = cast(Value); + auto *LD = cast(Value); EVT VT = LD->getMemoryVT(); if (!VT.isFloatingPoint() || VT != ST->getMemoryVT() || @@ -11253,7 +11253,7 @@ bool IsIndexSignExt = false; // Split up a folded GlobalAddress+Offset into its component parts. - if (GlobalAddressSDNode *GA = dyn_cast(Ptr)) + if (auto *GA = dyn_cast(Ptr)) if (GA->getOpcode() == ISD::GlobalAddress && GA->getOffset() != 0) { return BaseIndexOffset(DAG.getGlobalAddress(GA->getGlobal(), SDLoc(GA), @@ -11402,7 +11402,7 @@ SmallVector BuildVector; for (unsigned I = 0, E = Ty.getVectorNumElements(); I != E; ++I) { - StoreSDNode *St = cast(Stores[I].MemNode); + auto *St = cast(Stores[I].MemNode); Chains.push_back(St->getChain()); BuildVector.push_back(St->getValue()); } @@ -11453,7 +11453,7 @@ } else { SmallVector Ops; for (unsigned i = 0; i < NumStores; ++i) { - StoreSDNode *St = cast(StoreNodes[i].MemNode); + auto *St = cast(StoreNodes[i].MemNode); SDValue Val = St->getValue(); // All operands of BUILD_VECTOR / CONCAT_VECTOR must have the same type. if (Val.getValueType() != MemVT) @@ -11478,14 +11478,14 @@ bool IsLE = DAG.getDataLayout().isLittleEndian(); for (unsigned i = 0; i < NumStores; ++i) { unsigned Idx = IsLE ? (NumStores - 1 - i) : i; - StoreSDNode *St = cast(StoreNodes[Idx].MemNode); + auto *St = cast(StoreNodes[Idx].MemNode); Chains.push_back(St->getChain()); SDValue Val = St->getValue(); StoreInt <<= ElementSizeBytes * 8; - if (ConstantSDNode *C = dyn_cast(Val)) { + if (auto *C = dyn_cast(Val)) { StoreInt |= C->getAPIntValue().zext(SizeInBits); - } else if (ConstantFPSDNode *C = dyn_cast(Val)) { + } else if (auto *C = dyn_cast(Val)) { StoreInt |= C->getValueAPF().bitcastToAPInt().zext(SizeInBits); } else { llvm_unreachable("Invalid constant element type"); @@ -11518,7 +11518,7 @@ for (unsigned i = 0; i < NumStores; ++i) { if (StoreNodes[i].MemNode == LatestOp) continue; - StoreSDNode *St = cast(StoreNodes[i].MemNode); + auto *St = cast(StoreNodes[i].MemNode); // ReplaceAllUsesWith will replace all uses that existed when it was // called, but graph optimizations may cause new ones to appear. For // example, the case in pr14333 looks like @@ -11572,7 +11572,7 @@ SDValue Chain = St->getChain(); for (auto I = Chain->use_begin(), E = Chain->use_end(); I != E; ++I) { - if (StoreSDNode *OtherST = dyn_cast(*I)) { + if (auto *OtherST = dyn_cast(*I)) { if (I.getOperandNo() != 0) continue; @@ -11632,11 +11632,11 @@ // information to check if it interferes with anything. SDNode *NextInChain = Index->getChain().getNode(); while (1) { - if (StoreSDNode *STn = dyn_cast(NextInChain)) { + if (auto *STn = dyn_cast(NextInChain)) { // We found a store node. Use it for the next iteration. Index = STn; break; - } else if (LoadSDNode *Ldn = dyn_cast(NextInChain)) { + } else if (auto *Ldn = dyn_cast(NextInChain)) { if (Ldn->isVolatile()) { Index = nullptr; break; @@ -11787,12 +11787,12 @@ unsigned LastLegalVectorType = 0; bool NonZero = false; for (unsigned i=0; i(StoreNodes[i].MemNode); + auto *St = cast(StoreNodes[i].MemNode); SDValue StoredVal = St->getValue(); - if (ConstantSDNode *C = dyn_cast(StoredVal)) { + if (auto *C = dyn_cast(StoredVal)) { NonZero |= !C->isNullValue(); - } else if (ConstantFPSDNode *C = dyn_cast(StoredVal)) { + } else if (auto *C = dyn_cast(StoredVal)) { NonZero |= !C->getConstantFPValue()->isNullValue(); } else { // Non-constant. @@ -11852,7 +11852,7 @@ unsigned NumStoresToMerge = 0; bool IsVec = MemVT.isVector(); for (unsigned i = 0; i < LastConsecutiveStore + 1; ++i) { - StoreSDNode *St = cast(StoreNodes[i].MemNode); + auto *St = cast(StoreNodes[i].MemNode); unsigned StoreValOpcode = St->getValue().getOpcode(); // This restriction could be loosened. // Bail out if any stored values are not elements extracted from a vector. @@ -11892,8 +11892,8 @@ // must not be zext, volatile, indexed, and they must be consecutive. BaseIndexOffset LdBasePtr; for (unsigned i=0; i(StoreNodes[i].MemNode); - LoadSDNode *Ld = dyn_cast(St->getValue()); + auto *St = cast(StoreNodes[i].MemNode); + auto *Ld = dyn_cast(St->getValue()); if (!Ld) break; // Loads must only have one use. @@ -11937,7 +11937,7 @@ St->getAlignment() >= RequiredAlignment) return false; - LoadSDNode *FirstLoad = cast(LoadNodes[0].MemNode); + auto *FirstLoad = cast(LoadNodes[0].MemNode); unsigned FirstLoadAS = FirstLoad->getAddressSpace(); unsigned FirstLoadAlign = FirstLoad->getAlignment(); @@ -12058,7 +12058,7 @@ // Transfer chain users from old loads to the new load. for (unsigned i = 0; i < NumElem; ++i) { - LoadSDNode *Ld = cast(LoadNodes[i].MemNode); + auto *Ld = cast(LoadNodes[i].MemNode); DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), SDValue(NewLoad.getNode(), 1)); } @@ -12075,7 +12075,7 @@ // Remove all Store nodes. if (StoreNodes[i].MemNode == LatestOp) continue; - StoreSDNode *St = cast(StoreNodes[i].MemNode); + auto *St = cast(StoreNodes[i].MemNode); DAG.ReplaceAllUsesOfValueWith(SDValue(St, 0), St->getChain()); deleteAndRecombine(St); } @@ -12120,7 +12120,7 @@ SDValue Chain = ST->getChain(); SDValue Ptr = ST->getBasePtr(); - const ConstantFPSDNode *CFP = cast(Value); + const auto *CFP = cast(Value); // NOTE: If the original store is volatile, this transform must not increase // the number of stores. For example, on x86-32 an f64 can be stored in one @@ -12190,7 +12190,7 @@ } SDValue DAGCombiner::visitSTORE(SDNode *N) { - StoreSDNode *ST = cast(N); + auto *ST = cast(N); SDValue Chain = ST->getChain(); SDValue Value = ST->getValue(); SDValue Ptr = ST->getBasePtr(); @@ -12290,7 +12290,7 @@ // If this is a load followed by a store to the same location, then the store // is dead/noop. - if (LoadSDNode *Ld = dyn_cast(Value)) { + if (auto *Ld = dyn_cast(Value)) { if (Ld->getBasePtr() == Ptr && ST->getMemoryVT() == Ld->getMemoryVT() && ST->isUnindexed() && !ST->isVolatile() && // There can't be any side effects between the load and store, such as @@ -12303,7 +12303,7 @@ // If this is a store followed by a store with the same value to the same // location, then the store is dead/noop. - if (StoreSDNode *ST1 = dyn_cast(Chain)) { + if (auto *ST1 = dyn_cast(Chain)) { if (ST1->getBasePtr() == Ptr && ST->getMemoryVT() == ST1->getMemoryVT() && ST1->getValue() == Value && ST->isUnindexed() && !ST->isVolatile() && ST1->isUnindexed() && !ST1->isVolatile()) { @@ -12412,7 +12412,7 @@ // Match shift amount to HalfValBitSize. unsigned HalfValBitSize = Val.getValueSizeInBits() / 2; - ConstantSDNode *ShAmt = dyn_cast(Op1.getOperand(1)); + auto *ShAmt = dyn_cast(Op1.getOperand(1)); if (!ShAmt || ShAmt->getAPIntValue() != HalfValBitSize) return SDValue(); @@ -12627,7 +12627,7 @@ } SDValue EltNo = N->getOperand(1); - ConstantSDNode *ConstEltNo = dyn_cast(EltNo); + auto *ConstEltNo = dyn_cast(EltNo); // extract_vector_elt (build_vector x, y), 1 -> y if (ConstEltNo && @@ -12672,7 +12672,7 @@ // scalar value, then we can always use that. if (ConstEltNo && InVec.getOpcode() == ISD::VECTOR_SHUFFLE) { int NumElem = VT.getVectorNumElements(); - ShuffleVectorSDNode *SVOp = cast(InVec); + auto *SVOp = cast(InVec); // Find the new index to extract from. int OrigElt = SVOp->getMaskElt(ConstEltNo->getZExtValue()); @@ -12737,7 +12737,7 @@ ISD::isNormalLoad(InVec.getNode()) && !N->getOperand(1)->hasPredecessor(InVec.getNode())) { SDValue Index = N->getOperand(1); - if (LoadSDNode *OrigLoad = dyn_cast(InVec)) { + if (auto *OrigLoad = dyn_cast(InVec)) { if (!OrigLoad->isVolatile()) { return ReplaceExtractVectorEltOfLoadWithNarrowedLoad(N, VT, Index, OrigLoad); @@ -13560,7 +13560,7 @@ } unsigned IdentityIndex = i * PartNumElem; - ConstantSDNode *CS = dyn_cast(Op.getOperand(1)); + auto *CS = dyn_cast(Op.getOperand(1)); // The extract index must be constant. if (!CS) return SDValue(); @@ -13609,8 +13609,8 @@ return SDValue(); // Only handle cases where both indexes are constants with the same type. - ConstantSDNode *ExtIdx = dyn_cast(N->getOperand(1)); - ConstantSDNode *InsIdx = dyn_cast(V->getOperand(2)); + auto *ExtIdx = dyn_cast(N->getOperand(1)); + auto *InsIdx = dyn_cast(V->getOperand(2)); if (InsIdx && ExtIdx && InsIdx->getValueType(0).getSizeInBits() <= 64 && @@ -13727,7 +13727,7 @@ SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); - ShuffleVectorSDNode *SVN = cast(N); + auto *SVN = cast(N); SmallVector Ops; EVT ConcatVT = N0.getOperand(0).getValueType(); @@ -13861,7 +13861,7 @@ if (N0.isUndef() && N1.isUndef()) return DAG.getUNDEF(VT); - ShuffleVectorSDNode *SVN = cast(N); + auto *SVN = cast(N); // Canonicalize shuffle v, v -> v, undef if (N0 == N1) { @@ -14010,7 +14010,7 @@ int OuterScale = SVT.getSizeInBits() / ScaleSVT.getSizeInBits(); // Scale the shuffle masks to the smaller scalar type. - ShuffleVectorSDNode *InnerSVN = cast(BC0); + auto *InnerSVN = cast(BC0); SmallVector InnerMask = ScaleShuffleMask(InnerSVN->getMask(), InnerScale); SmallVector OuterMask = @@ -14071,7 +14071,7 @@ // Only fold if this shuffle is the only user of the other shuffle. if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && N->isOnlyUserOf(N0.getNode()) && Level < AfterLegalizeDAG && TLI.isTypeLegal(VT)) { - ShuffleVectorSDNode *OtherSV = cast(N0); + auto *OtherSV = cast(N0); // The incoming shuffle must be of the same type as the result of the // current shuffle. @@ -14182,7 +14182,7 @@ // FIXME: We could support implicit truncation if the shuffle can be // scaled to a smaller vector scalar type. - ConstantSDNode *C0 = dyn_cast(EltNo); + auto *C0 = dyn_cast(EltNo); if (C0 && VT == InVec.getValueType() && VT.getScalarType() == InVal.getValueType()) { SmallVector NewMask(VT.getVectorNumElements(), -1); @@ -14383,8 +14383,8 @@ isa(RHS) && LHS.hasOneUse() && RHS.hasOneUse() && LHS.getOperand(1).isUndef() && RHS.getOperand(1).isUndef()) { - ShuffleVectorSDNode *SVN0 = cast(LHS); - ShuffleVectorSDNode *SVN1 = cast(RHS); + auto *SVN0 = cast(LHS); + auto *SVN1 = cast(RHS); if (SVN0->getMask().equals(SVN1->getMask())) { EVT VT = N->getValueType(0); @@ -14483,8 +14483,8 @@ // This triggers in things like "select bool X, 10.0, 123.0" after the FP // constants have been dropped into the constant pool. if (LHS.getOpcode() == ISD::LOAD) { - LoadSDNode *LLD = cast(LHS); - LoadSDNode *RLD = cast(RHS); + auto *LLD = cast(LHS); + auto *RLD = cast(RHS); // Token chains must be identical. if (LHS.getOperand(0) != RHS.getOperand(0) || @@ -14639,22 +14639,22 @@ if (N2 == N3) return N2; EVT VT = N2.getValueType(); - ConstantSDNode *N1C = dyn_cast(N1.getNode()); - ConstantSDNode *N2C = dyn_cast(N2.getNode()); + auto *N1C = dyn_cast(N1.getNode()); + auto *N2C = dyn_cast(N2.getNode()); // Determine if the condition we're dealing with is constant SDValue SCC = SimplifySetCC(getSetCCResultType(N0.getValueType()), N0, N1, CC, DL, false); if (SCC.getNode()) AddToWorklist(SCC.getNode()); - if (ConstantSDNode *SCCC = dyn_cast_or_null(SCC.getNode())) { + if (auto *SCCC = dyn_cast_or_null(SCC.getNode())) { // fold select_cc true, x, y -> x // fold select_cc false, x, y -> y return !SCCC->isNullValue() ? N2 : N3; } // Check to see if we can simplify the select into an fabs node - if (ConstantFPSDNode *CFP = dyn_cast(N1)) { + if (auto *CFP = dyn_cast(N1)) { // Allow either -0.0 or 0.0 if (CFP->isZero()) { // select (setg[te] X, +/-0.0), X, fneg(X) -> fabs @@ -14679,8 +14679,8 @@ // types an we want the other legalization to happen first (e.g. to avoid // messing with soft float) and if the ConstantFP is not legal, because if // it is legal, we may not need to store the FP constant in a constant pool. - if (ConstantFPSDNode *TV = dyn_cast(N2)) - if (ConstantFPSDNode *FV = dyn_cast(N3)) { + if (auto *TV = dyn_cast(N2)) + if (auto *FV = dyn_cast(N3)) { if (TLI.isTypeLegal(N2.getValueType()) && (TLI.getOperationAction(ISD::ConstantFP, N2.getValueType()) != TargetLowering::Legal && @@ -14738,7 +14738,7 @@ if (CC == ISD::SETEQ && N0->getOpcode() == ISD::AND && N0->getValueType(0) == VT && isNullConstant(N1) && isNullConstant(N2)) { SDValue AndLHS = N0->getOperand(0); - ConstantSDNode *ConstAndRHS = dyn_cast(N0->getOperand(1)); + auto *ConstAndRHS = dyn_cast(N0->getOperand(1)); if (ConstAndRHS && ConstAndRHS->getAPIntValue().countPopulation() == 1) { // Shift the tested bit over the sign bit. const APInt &AndMask = ConstAndRHS->getAPIntValue(); @@ -15175,7 +15175,7 @@ // If it's an adding a simple constant then integrate the offset. if (Base.getOpcode() == ISD::ADD) { - if (ConstantSDNode *C = dyn_cast(Base.getOperand(1))) { + if (auto *C = dyn_cast(Base.getOperand(1))) { Base = Base.getOperand(0); Offset += C->getZExtValue(); } @@ -15184,7 +15184,7 @@ // Return the underlying GlobalValue, and update the Offset. Return false // for GlobalAddressSDNode since the same GlobalAddress may be represented // by multiple nodes with different offsets. - if (GlobalAddressSDNode *G = dyn_cast(Base)) { + if (auto *G = dyn_cast(Base)) { GV = G->getGlobal(); Offset += G->getOffset(); return false; @@ -15193,7 +15193,7 @@ // Return the underlying Constant value, and update the Offset. Return false // for ConstantSDNodes since the same constant pool entry may be represented // by multiple nodes with different offsets. - if (ConstantPoolSDNode *C = dyn_cast(Base)) { + if (auto *C = dyn_cast(Base)) { CV = C->isMachineConstantPoolEntry() ? (const void *)C->getMachineCPVal() : (const void *)C->getConstVal(); Offset += C->getOffset(); @@ -15443,7 +15443,7 @@ // information to check if it interferes with anything. SDNode *NextInChain = Index->getChain().getNode(); while (true) { - if (StoreSDNode *STn = dyn_cast(NextInChain)) { + if (auto *STn = dyn_cast(NextInChain)) { // We found a store node. Use it for the next iteration. if (STn->isVolatile() || STn->isIndexed()) { Index = nullptr; @@ -15452,7 +15452,7 @@ ChainedStores.push_back(STn); Index = STn; break; - } else if (LoadSDNode *Ldn = dyn_cast(NextInChain)) { + } else if (auto *Ldn = dyn_cast(NextInChain)) { NextInChain = Ldn->getChain().getNode(); continue; } else { Index: lib/CodeGen/SelectionDAG/FastISel.cpp =================================================================== --- lib/CodeGen/SelectionDAG/FastISel.cpp +++ lib/CodeGen/SelectionDAG/FastISel.cpp @@ -136,7 +136,7 @@ bool FastISel::hasTrivialKill(const Value *V) { // Don't consider constants or arguments to have trivial kills. - const Instruction *I = dyn_cast(V); + const auto *I = dyn_cast(V); if (!I) return false; @@ -964,7 +964,7 @@ Flags.setByVal(); } if (Arg.IsByVal || Arg.IsInAlloca) { - PointerType *Ty = cast(Arg.Ty); + auto *Ty = cast(Arg.Ty); Type *ElementTy = Ty->getElementType(); unsigned FrameSize = DL.getTypeAllocSize(ElementTy); // For ByVal, alignment should come from FE. BE will guess if this info is @@ -1039,10 +1039,10 @@ } bool FastISel::selectCall(const User *I) { - const CallInst *Call = cast(I); + const auto *Call = cast(I); // Handle simple inline asms. - if (const InlineAsm *IA = dyn_cast(Call->getCalledValue())) { + if (const auto *IA = dyn_cast(Call->getCalledValue())) { // If the inline asm has side effects, then make sure that no local value // lives across by flushing the local value map. if (IA->hasSideEffects()) @@ -1097,7 +1097,7 @@ case Intrinsic::assume: return true; case Intrinsic::dbg_declare: { - const DbgDeclareInst *DI = cast(II); + const auto *DI = cast(II); assert(DI->getVariable() && "Missing variable"); if (!FuncInfo.MF->getMMI().hasDebugInfo()) { DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n"); @@ -1162,7 +1162,7 @@ } case Intrinsic::dbg_value: { // This form of DBG_VALUE is target-independent. - const DbgValueInst *DI = cast(II); + const auto *DI = cast(II); const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE); const Value *V = DI->getValue(); assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) && @@ -1207,7 +1207,7 @@ return true; } case Intrinsic::objectsize: { - ConstantInt *CI = cast(II->getArgOperand(1)); + auto *CI = cast(II->getArgOperand(1)); unsigned long long Res = CI->isZero() ? -1ULL : 0; Constant *ResCI = ConstantInt::get(II->getType(), Res); unsigned ResultReg = getRegForValue(ResCI); @@ -1497,7 +1497,7 @@ } bool FastISel::selectExtractValue(const User *U) { - const ExtractValueInst *EVI = dyn_cast(U); + const auto *EVI = dyn_cast(U); if (!EVI) return false; @@ -1582,7 +1582,7 @@ return selectGetElementPtr(I); case Instruction::Br: { - const BranchInst *BI = cast(I); + const auto *BI = cast(I); if (BI->isUnconditional()) { const BasicBlock *LLVMSucc = BI->getSuccessor(0); Index: lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp =================================================================== --- lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp +++ lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp @@ -129,7 +129,7 @@ for (; BB != EB; ++BB) for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I) { - if (const AllocaInst *AI = dyn_cast(I)) { + if (const auto *AI = dyn_cast(I)) { Type *Ty = AI->getAllocatedType(); unsigned Align = std::max((unsigned)MF->getDataLayout().getPrefTypeAlignment(Ty), @@ -140,7 +140,7 @@ // do this if there is an extra alignment requirement. if (AI->isStaticAlloca() && (TFI->isStackRealignable() || (Align <= StackAlign))) { - const ConstantInt *CUI = cast(AI->getArraySize()); + const auto *CUI = cast(AI->getArraySize()); uint64_t TySize = MF->getDataLayout().getTypeAllocSize(Ty); TySize *= CUI->getZExtValue(); // Get total allocated size. @@ -220,7 +220,7 @@ // Collect llvm.dbg.declare information. This is done now instead of // during the initial isel pass through the IR so that it is done // in a predictable order. - if (const DbgDeclareInst *DI = dyn_cast(I)) { + if (const auto *DI = dyn_cast(I)) { assert(DI->getVariable() && "Missing variable"); assert(DI->getDebugLoc() && "Missing location"); if (MMI.hasDebugInfo()) { @@ -229,9 +229,9 @@ // temporary alloca at this point). const Value *Address = DI->getAddress(); if (Address) { - if (const BitCastInst *BCI = dyn_cast(Address)) + if (const auto *BCI = dyn_cast(Address)) Address = BCI->getOperand(0); - if (const AllocaInst *AI = dyn_cast(Address)) { + if (const auto *AI = dyn_cast(Address)) { DenseMap::iterator SI = StaticAllocaMap.find(AI); if (SI != StaticAllocaMap.end()) { // Check for VLAs. @@ -286,7 +286,7 @@ // Create Machine PHI nodes for LLVM PHI nodes, lowering them as // appropriate. for (BasicBlock::const_iterator I = BB->begin(); - const PHINode *PN = dyn_cast(I); ++I) { + const auto *PN = dyn_cast(I); ++I) { if (PN->use_empty()) continue; // Skip empty types @@ -451,7 +451,7 @@ return; } - if (ConstantInt *CI = dyn_cast(V)) { + if (auto *CI = dyn_cast(V)) { APInt Val = CI->getValue().zextOrTrunc(BitWidth); DestLOI.NumSignBits = Val.getNumSignBits(); DestLOI.KnownZero = ~Val; @@ -486,7 +486,7 @@ return; } - if (ConstantInt *CI = dyn_cast(V)) { + if (auto *CI = dyn_cast(V)) { APInt Val = CI->getValue().zextOrTrunc(BitWidth); DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, Val.getNumSignBits()); DestLOI.KnownZero &= ~Val; Index: lib/CodeGen/SelectionDAG/InstrEmitter.cpp =================================================================== --- lib/CodeGen/SelectionDAG/InstrEmitter.cpp +++ lib/CodeGen/SelectionDAG/InstrEmitter.cpp @@ -71,7 +71,7 @@ for (unsigned I = N; I > NumExpUses; --I) { if (isa(Node->getOperand(I - 1))) continue; - if (RegisterSDNode *RN = dyn_cast(Node->getOperand(I - 1))) + if (auto *RN = dyn_cast(Node->getOperand(I - 1))) if (TargetRegisterInfo::isPhysicalRegister(RN->getReg())) continue; NumImpUses = N - I; @@ -389,28 +389,28 @@ if (Op.isMachineOpcode()) { AddRegisterOperand(MIB, Op, IIOpNum, II, VRBaseMap, IsDebug, IsClone, IsCloned); - } else if (ConstantSDNode *C = dyn_cast(Op)) { + } else if (auto *C = dyn_cast(Op)) { MIB.addImm(C->getSExtValue()); - } else if (ConstantFPSDNode *F = dyn_cast(Op)) { + } else if (auto *F = dyn_cast(Op)) { MIB.addFPImm(F->getConstantFPValue()); - } else if (RegisterSDNode *R = dyn_cast(Op)) { + } else if (auto *R = dyn_cast(Op)) { // Turn additional physreg operands into implicit uses on non-variadic // instructions. This is used by call and return instructions passing // arguments in registers. bool Imp = II && (IIOpNum >= II->getNumOperands() && !II->isVariadic()); MIB.addReg(R->getReg(), getImplRegState(Imp)); - } else if (RegisterMaskSDNode *RM = dyn_cast(Op)) { + } else if (auto *RM = dyn_cast(Op)) { MIB.addRegMask(RM->getRegMask()); - } else if (GlobalAddressSDNode *TGA = dyn_cast(Op)) { + } else if (auto *TGA = dyn_cast(Op)) { MIB.addGlobalAddress(TGA->getGlobal(), TGA->getOffset(), TGA->getTargetFlags()); - } else if (BasicBlockSDNode *BBNode = dyn_cast(Op)) { + } else if (auto *BBNode = dyn_cast(Op)) { MIB.addMBB(BBNode->getBasicBlock()); - } else if (FrameIndexSDNode *FI = dyn_cast(Op)) { + } else if (auto *FI = dyn_cast(Op)) { MIB.addFrameIndex(FI->getIndex()); - } else if (JumpTableSDNode *JT = dyn_cast(Op)) { + } else if (auto *JT = dyn_cast(Op)) { MIB.addJumpTableIndex(JT->getIndex(), JT->getTargetFlags()); - } else if (ConstantPoolSDNode *CP = dyn_cast(Op)) { + } else if (auto *CP = dyn_cast(Op)) { int Offset = CP->getOffset(); unsigned Align = CP->getAlignment(); Type *Type = CP->getType(); @@ -430,15 +430,15 @@ else Idx = MCP->getConstantPoolIndex(CP->getConstVal(), Align); MIB.addConstantPoolIndex(Idx, Offset, CP->getTargetFlags()); - } else if (ExternalSymbolSDNode *ES = dyn_cast(Op)) { + } else if (auto *ES = dyn_cast(Op)) { MIB.addExternalSymbol(ES->getSymbol(), ES->getTargetFlags()); } else if (auto *SymNode = dyn_cast(Op)) { MIB.addSym(SymNode->getMCSymbol()); - } else if (BlockAddressSDNode *BA = dyn_cast(Op)) { + } else if (auto *BA = dyn_cast(Op)) { MIB.addBlockAddress(BA->getBlockAddress(), BA->getOffset(), BA->getTargetFlags()); - } else if (TargetIndexSDNode *TI = dyn_cast(Op)) { + } else if (auto *TI = dyn_cast(Op)) { MIB.addTargetIndex(TI->getIndex(), TI->getOffset(), TI->getTargetFlags()); } else { assert(Op.getValueType() != MVT::Other && @@ -569,7 +569,7 @@ // If creating a subreg_to_reg, then the first input operand // is an implicit value immediate, otherwise it's a register if (Opc == TargetOpcode::SUBREG_TO_REG) { - const ConstantSDNode *SD = cast(N0); + const auto *SD = cast(N0); MIB.addImm(SD->getZExtValue()); } else AddOperand(MIB, N0, 0, nullptr, VRBaseMap, /*IsDebug=*/false, @@ -627,7 +627,7 @@ for (unsigned i = 1; i != NumOps; ++i) { SDValue Op = Node->getOperand(i); if ((i & 1) == 0) { - RegisterSDNode *R = dyn_cast(Node->getOperand(i-1)); + auto *R = dyn_cast(Node->getOperand(i-1)); // Skip physical registers as they don't have a vreg to get and we'll // insert copies for them in TwoAddressInstructionPass anyway. if (!R || !TargetRegisterInfo::isPhysicalRegister(R->getReg())) { @@ -693,12 +693,12 @@ /*IsDebug=*/true, /*IsClone=*/false, /*IsCloned=*/false); } else if (SD->getKind() == SDDbgValue::CONST) { const Value *V = SD->getConst(); - if (const ConstantInt *CI = dyn_cast(V)) { + if (const auto *CI = dyn_cast(V)) { if (CI->getBitWidth() > 64) MIB.addCImm(CI); else MIB.addImm(CI->getSExtValue()); - } else if (const ConstantFP *CF = dyn_cast(V)) { + } else if (const auto *CF = dyn_cast(V)) { MIB.addFPImm(CF); } else { // Could be an Undef. In any case insert an Undef so we can see what we @@ -869,7 +869,7 @@ // In addition to declared implicit uses, we must also check for // direct RegisterSDNode operands. for (unsigned i = 0, e = F->getNumOperands(); i != e; ++i) - if (RegisterSDNode *R = dyn_cast(F->getOperand(i))) { + if (auto *R = dyn_cast(F->getOperand(i))) { unsigned Reg = R->getReg(); if (TargetRegisterInfo::isPhysicalRegister(Reg)) UsedRegs.push_back(Reg); @@ -905,7 +905,7 @@ case ISD::CopyToReg: { unsigned SrcReg; SDValue SrcVal = Node->getOperand(2); - if (RegisterSDNode *R = dyn_cast(SrcVal)) + if (auto *R = dyn_cast(SrcVal)) SrcReg = R->getReg(); else SrcReg = getVR(SrcVal, VRBaseMap); @@ -935,7 +935,7 @@ unsigned TarOp = (Node->getOpcode() == ISD::LIFETIME_START) ? TargetOpcode::LIFETIME_START : TargetOpcode::LIFETIME_END; - FrameIndexSDNode *FI = dyn_cast(Node->getOperand(1)); + auto *FI = dyn_cast(Node->getOperand(1)); BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TarOp)) .addFrameIndex(FI->getIndex()); break; Index: lib/CodeGen/SelectionDAG/LegalizeDAG.cpp =================================================================== --- lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -358,7 +358,7 @@ SDValue SelectionDAGLegalize::ExpandINSERT_VECTOR_ELT(SDValue Vec, SDValue Val, SDValue Idx, const SDLoc &dl) { - if (ConstantSDNode *InsertPos = dyn_cast(Idx)) { + if (auto *InsertPos = dyn_cast(Idx)) { // SCALAR_TO_VECTOR requires that the type of the value being inserted // match the element type of the vector being created, except for // integers in which case the inserted value can be over width. @@ -396,7 +396,7 @@ MachineMemOperand::Flags MMOFlags = ST->getMemOperand()->getFlags(); AAMDNodes AAInfo = ST->getAAInfo(); SDLoc dl(ST); - if (ConstantFPSDNode *CFP = dyn_cast(ST->getValue())) { + if (auto *CFP = dyn_cast(ST->getValue())) { if (CFP->getValueType(0) == MVT::f32 && TLI.isTypeLegal(MVT::i32)) { SDValue Con = DAG.getConstant(CFP->getValueAPF(). @@ -441,7 +441,7 @@ } void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) { - StoreSDNode *ST = cast(Node); + auto *ST = cast(Node); SDValue Chain = ST->getChain(); SDValue Ptr = ST->getBasePtr(); SDLoc dl(Node); @@ -610,7 +610,7 @@ } void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) { - LoadSDNode *LD = cast(Node); + auto *LD = cast(Node); SDValue Chain = LD->getChain(); // The chain. SDValue Ptr = LD->getBasePtr(); // The base pointer. SDValue Value; // The value returned by the load op. @@ -1187,7 +1187,7 @@ for (SDNode::use_iterator UI = Vec.getNode()->use_begin(), UE = Vec.getNode()->use_end(); UI != UE; ++UI) { SDNode *User = *UI; - if (StoreSDNode *ST = dyn_cast(User)) { + if (auto *ST = dyn_cast(User)) { if (ST->isIndexed() || ST->isTruncatingStore() || ST->getValue() != Vec) continue; @@ -1657,7 +1657,7 @@ SrcOp.getValueType().getTypeForEVT(*DAG.getContext())); SDValue FIPtr = DAG.CreateStackTemporary(SlotVT, SrcAlign); - FrameIndexSDNode *StackPtrFI = cast(FIPtr); + auto *StackPtrFI = cast(FIPtr); int SPFI = StackPtrFI->getIndex(); MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI); @@ -1696,7 +1696,7 @@ // then load the whole vector back out. SDValue StackPtr = DAG.CreateStackTemporary(Node->getValueType(0)); - FrameIndexSDNode *StackPtrFI = cast(StackPtr); + auto *StackPtrFI = cast(StackPtr); int SPFI = StackPtrFI->getIndex(); SDValue Ch = DAG.getTruncStore( @@ -1846,10 +1846,10 @@ if (isConstant) { SmallVector CV; for (unsigned i = 0, e = NumElems; i != e; ++i) { - if (ConstantFPSDNode *V = + if (auto *V = dyn_cast(Node->getOperand(i))) { CV.push_back(const_cast(V->getConstantFPValue())); - } else if (ConstantSDNode *V = + } else if (auto *V = dyn_cast(Node->getOperand(i))) { if (OpVT==EltVT) CV.push_back(const_cast(V->getConstantIntValue())); @@ -3241,7 +3241,7 @@ } break; case ISD::ConstantFP: { - ConstantFPSDNode *CFP = cast(Node); + auto *CFP = cast(Node); // Check to see if this FP immediate is already legal. // If this is a legal constant, turn it into a TargetConstantFP node. if (!TLI.isFPImmLegal(CFP->getValueAPF(), Node->getValueType(0))) @@ -3249,7 +3249,7 @@ break; } case ISD::Constant: { - ConstantSDNode *CP = cast(Node); + auto *CP = cast(Node); Results.push_back(ExpandConstant(CP)); break; } Index: lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp =================================================================== --- lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp +++ lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp @@ -148,7 +148,7 @@ // When LegalInHWReg, we can load better from the constant pool. if (isLegalInHWReg(N->getValueType(ResNo))) return SDValue(N, ResNo); - ConstantFPSDNode *CN = cast(N); + auto *CN = cast(N); // In ppcf128, the high 64 bits are always first in memory regardless // of Endianness. LLVM's APFloat representation is not Endian sensitive, // and so always converts into a 128-bit APInt in a non-Endian-sensitive @@ -626,7 +626,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_LOAD(SDNode *N, unsigned ResNo) { bool LegalInHWReg = isLegalInHWReg(N->getValueType(ResNo)); - LoadSDNode *L = cast(N); + auto *L = cast(N); EVT VT = N->getValueType(0); EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); SDLoc dl(N); @@ -960,7 +960,7 @@ SDValue DAGTypeLegalizer::SoftenFloatOp_STORE(SDNode *N, unsigned OpNo) { assert(ISD::isUNINDEXEDStore(N) && "Indexed store during type legalization!"); assert(OpNo == 1 && "Can only soften the stored value!"); - StoreSDNode *ST = cast(N); + auto *ST = cast(N); SDValue Val = ST->getValue(); SDLoc dl(N); @@ -1377,7 +1377,7 @@ } assert(ISD::isUNINDEXEDLoad(N) && "Indexed load during type legalization!"); - LoadSDNode *LD = cast(N); + auto *LD = cast(N); SDValue Chain = LD->getChain(); SDValue Ptr = LD->getBasePtr(); SDLoc dl(N); @@ -1697,7 +1697,7 @@ assert(ISD::isUNINDEXEDStore(N) && "Indexed store during type legalization!"); assert(OpNo == 1 && "Can only expand the stored value so far"); - StoreSDNode *ST = cast(N); + auto *ST = cast(N); SDValue Chain = ST->getChain(); SDValue Ptr = ST->getBasePtr(); @@ -1828,7 +1828,7 @@ // Lower the promoted Float down to the integer value of same size and construct // a STORE of the integer value. SDValue DAGTypeLegalizer::PromoteFloatOp_STORE(SDNode *N, unsigned OpNo) { - StoreSDNode *ST = cast(N); + auto *ST = cast(N); SDValue Val = ST->getValue(); SDLoc DL(N); @@ -1928,7 +1928,7 @@ } SDValue DAGTypeLegalizer::PromoteFloatRes_ConstantFP(SDNode *N) { - ConstantFPSDNode *CFPNode = cast(N); + auto *CFPNode = cast(N); EVT VT = N->getValueType(0); SDLoc DL(N); @@ -2080,7 +2080,7 @@ } SDValue DAGTypeLegalizer::PromoteFloatRes_LOAD(SDNode *N) { - LoadSDNode *L = cast(N); + auto *L = cast(N); EVT VT = N->getValueType(0); // Load the value as an integer value with the same number of bits. Index: lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp =================================================================== --- lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -1358,7 +1358,7 @@ break; } case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: { - AtomicSDNode *AN = cast(N); + auto *AN = cast(N); SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::Other); SDValue Tmp = DAG.getAtomicCmpSwap( ISD::ATOMIC_CMP_SWAP, SDLoc(N), AN->getMemoryVT(), VTs, @@ -2344,7 +2344,7 @@ // If we can emit an efficient shift operation, do so now. Check to see if // the RHS is a constant. - if (ConstantSDNode *CN = dyn_cast(N->getOperand(1))) + if (auto *CN = dyn_cast(N->getOperand(1))) return ExpandShiftByConstant(N, CN->getAPIntValue(), Lo, Hi); // If we can determine that the high bit of the shift is zero or one, even if @@ -2810,7 +2810,7 @@ if (CCCode == ISD::SETEQ || CCCode == ISD::SETNE) { if (RHSLo == RHSHi) { - if (ConstantSDNode *RHSCST = dyn_cast(RHSLo)) { + if (auto *RHSCST = dyn_cast(RHSLo)) { if (RHSCST->isAllOnesValue()) { // Equality comparison to -1. NewLHS = DAG.getNode(ISD::AND, dl, @@ -2830,7 +2830,7 @@ // If this is a comparison of the sign bit, just look at the top part. // X > -1, x < 0 - if (ConstantSDNode *CST = dyn_cast(NewRHS)) + if (auto *CST = dyn_cast(NewRHS)) if ((CCCode == ISD::SETLT && CST->isNullValue()) || // X < 0 (CCCode == ISD::SETGT && CST->isAllOnesValue())) { // X > -1 NewLHS = LHSHi; @@ -2877,8 +2877,8 @@ DAG.getNode(ISD::SETCC, dl, getSetCCResultType(LHSHi.getValueType()), LHSHi, RHSHi, DAG.getCondCode(CCCode)); - ConstantSDNode *LoCmpC = dyn_cast(LoCmp.getNode()); - ConstantSDNode *HiCmpC = dyn_cast(HiCmp.getNode()); + auto *LoCmpC = dyn_cast(LoCmp.getNode()); + auto *HiCmpC = dyn_cast(HiCmp.getNode()); bool EqAllowed = (CCCode == ISD::SETLE || CCCode == ISD::SETGE || CCCode == ISD::SETUGE || CCCode == ISD::SETULE); @@ -3255,7 +3255,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_VECTOR_SHUFFLE(SDNode *N) { - ShuffleVectorSDNode *SV = cast(N); + auto *SV = cast(N); EVT VT = N->getValueType(0); SDLoc dl(N); Index: lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp =================================================================== --- lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp +++ lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp @@ -254,7 +254,7 @@ assert(ISD::isNormalLoad(N) && "This routine only for normal loads!"); SDLoc dl(N); - LoadSDNode *LD = cast(N); + auto *LD = cast(N); EVT ValueVT = LD->getValueType(0); EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), ValueVT); SDValue Chain = LD->getChain(); @@ -466,7 +466,7 @@ assert(OpNo == 1 && "Can only expand the stored value so far"); SDLoc dl(N); - StoreSDNode *St = cast(N); + auto *St = cast(N); EVT ValueVT = St->getValue().getValueType(); EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), ValueVT); SDValue Chain = St->getChain(); Index: lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp =================================================================== --- lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp +++ lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp @@ -201,7 +201,7 @@ bool HasVectorValue = false; if (Op.getOpcode() == ISD::LOAD) { - LoadSDNode *LD = cast(Op.getNode()); + auto *LD = cast(Op.getNode()); ISD::LoadExtType ExtType = LD->getExtensionType(); if (LD->getMemoryVT().isVector() && ExtType != ISD::NON_EXTLOAD) switch (TLI.getLoadExtAction(LD->getExtensionType(), LD->getValueType(0), @@ -229,7 +229,7 @@ return LegalizeOp(ExpandLoad(Op)); } } else if (Op.getOpcode() == ISD::STORE) { - StoreSDNode *ST = cast(Op.getNode()); + auto *ST = cast(Op.getNode()); EVT StVT = ST->getMemoryVT(); MVT ValVT = ST->getValue().getSimpleValueType(); if (StVT.isVector() && ST->isTruncatingStore()) @@ -495,7 +495,7 @@ SDValue VectorLegalizer::ExpandLoad(SDValue Op) { - LoadSDNode *LD = cast(Op.getNode()); + auto *LD = cast(Op.getNode()); EVT SrcVT = LD->getMemoryVT(); EVT SrcEltVT = SrcVT.getScalarType(); @@ -637,7 +637,7 @@ } SDValue VectorLegalizer::ExpandStore(SDValue Op) { - StoreSDNode *ST = cast(Op.getNode()); + auto *ST = cast(Op.getNode()); EVT StVT = ST->getMemoryVT(); EVT MemSclVT = StVT.getScalarType(); Index: lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp =================================================================== --- lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -856,7 +856,7 @@ // TODO: The IdxVal == 0 constraint is artificial, we could do this whenever // the index is constant and there is no boundary crossing. But those cases // don't seem to get hit in practice. - if (ConstantSDNode *ConstIdx = dyn_cast(Idx)) { + if (auto *ConstIdx = dyn_cast(Idx)) { unsigned IdxVal = ConstIdx->getZExtValue(); if ((IdxVal == 0) && (IdxVal + SubElems <= VecElems / 2)) { EVT LoVT, HiVT; @@ -976,7 +976,7 @@ SDLoc dl(N); GetSplitVector(Vec, Lo, Hi); - if (ConstantSDNode *CIdx = dyn_cast(Idx)) { + if (auto *CIdx = dyn_cast(Idx)) { unsigned IdxVal = CIdx->getZExtValue(); unsigned LoNumElts = Lo.getValueType().getVectorNumElements(); if (IdxVal < LoNumElts) @@ -2822,7 +2822,7 @@ } SDValue DAGTypeLegalizer::WidenVecRes_LOAD(SDNode *N) { - LoadSDNode *LD = cast(N); + auto *LD = cast(N); ISD::LoadExtType ExtType = LD->getExtensionType(); SDValue Result; @@ -3269,7 +3269,7 @@ SDValue DAGTypeLegalizer::WidenVecOp_STORE(SDNode *N) { // We have to widen the value, but we want only to store the original // vector type. - StoreSDNode *ST = cast(N); + auto *ST = cast(N); SmallVector StChain; if (ST->isTruncatingStore()) @@ -3284,7 +3284,7 @@ } SDValue DAGTypeLegalizer::WidenVecOp_MSTORE(SDNode *N, unsigned OpNo) { - MaskedStoreSDNode *MST = cast(N); + auto *MST = cast(N); SDValue Mask = MST->getMask(); EVT MaskVT = Mask.getValueType(); SDValue StVal = MST->getValue(); @@ -3321,7 +3321,7 @@ SDValue DAGTypeLegalizer::WidenVecOp_MSCATTER(SDNode *N, unsigned OpNo) { assert(OpNo == 1 && "Can widen only data operand of mscatter"); - MaskedScatterSDNode *MSC = cast(N); + auto *MSC = cast(N); SDValue DataOp = MSC->getValue(); SDValue Mask = MSC->getMask(); Index: lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp =================================================================== --- lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp +++ lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp @@ -145,7 +145,7 @@ SDVTList VTList = DAG->getVTList(VTs); MachineSDNode::mmo_iterator Begin = nullptr, End = nullptr; - MachineSDNode *MN = dyn_cast(N); + auto *MN = dyn_cast(N); // Store memory references. if (MN) { Index: lib/CodeGen/SelectionDAG/SelectionDAG.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -135,10 +135,10 @@ // constants are. SDValue NotZero = N->getOperand(i); unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); - if (ConstantSDNode *CN = dyn_cast(NotZero)) { + if (auto *CN = dyn_cast(NotZero)) { if (CN->getAPIntValue().countTrailingOnes() < EltSize) return false; - } else if (ConstantFPSDNode *CFPN = dyn_cast(NotZero)) { + } else if (auto *CFPN = dyn_cast(NotZero)) { if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize) return false; } else @@ -174,10 +174,10 @@ // we care if the resultant vector is all zeros, not whether the individual // constants are. unsigned EltSize = N->getValueType(0).getScalarSizeInBits(); - if (ConstantSDNode *CN = dyn_cast(Op)) { + if (auto *CN = dyn_cast(Op)) { if (CN->getAPIntValue().countTrailingZeros() < EltSize) return false; - } else if (ConstantFPSDNode *CFPN = dyn_cast(Op)) { + } else if (auto *CFPN = dyn_cast(Op)) { if (CFPN->getValueAPF().bitcastToAPInt().countTrailingZeros() < EltSize) return false; } else @@ -385,7 +385,7 @@ default: break; // Normal nodes don't need extra info. case ISD::TargetConstant: case ISD::Constant: { - const ConstantSDNode *C = cast(N); + const auto *C = cast(N); ID.AddPointer(C->getConstantIntValue()); ID.AddBoolean(C->isOpaque()); break; @@ -399,7 +399,7 @@ case ISD::GlobalAddress: case ISD::TargetGlobalTLSAddress: case ISD::GlobalTLSAddress: { - const GlobalAddressSDNode *GA = cast(N); + const auto *GA = cast(N); ID.AddPointer(GA->getGlobal()); ID.AddInteger(GA->getOffset()); ID.AddInteger(GA->getTargetFlags()); @@ -428,7 +428,7 @@ break; case ISD::ConstantPool: case ISD::TargetConstantPool: { - const ConstantPoolSDNode *CP = cast(N); + const auto *CP = cast(N); ID.AddInteger(CP->getAlignment()); ID.AddInteger(CP->getOffset()); if (CP->isMachineConstantPoolEntry()) @@ -439,21 +439,21 @@ break; } case ISD::TargetIndex: { - const TargetIndexSDNode *TI = cast(N); + const auto *TI = cast(N); ID.AddInteger(TI->getIndex()); ID.AddInteger(TI->getOffset()); ID.AddInteger(TI->getTargetFlags()); break; } case ISD::LOAD: { - const LoadSDNode *LD = cast(N); + const auto *LD = cast(N); ID.AddInteger(LD->getMemoryVT().getRawBits()); ID.AddInteger(LD->getRawSubclassData()); ID.AddInteger(LD->getPointerInfo().getAddrSpace()); break; } case ISD::STORE: { - const StoreSDNode *ST = cast(N); + const auto *ST = cast(N); ID.AddInteger(ST->getMemoryVT().getRawBits()); ID.AddInteger(ST->getRawSubclassData()); ID.AddInteger(ST->getPointerInfo().getAddrSpace()); @@ -474,19 +474,19 @@ case ISD::ATOMIC_LOAD_UMAX: case ISD::ATOMIC_LOAD: case ISD::ATOMIC_STORE: { - const AtomicSDNode *AT = cast(N); + const auto *AT = cast(N); ID.AddInteger(AT->getMemoryVT().getRawBits()); ID.AddInteger(AT->getRawSubclassData()); ID.AddInteger(AT->getPointerInfo().getAddrSpace()); break; } case ISD::PREFETCH: { - const MemSDNode *PF = cast(N); + const auto *PF = cast(N); ID.AddInteger(PF->getPointerInfo().getAddrSpace()); break; } case ISD::VECTOR_SHUFFLE: { - const ShuffleVectorSDNode *SVN = cast(N); + const auto *SVN = cast(N); for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements(); i != e; ++i) ID.AddInteger(SVN->getMaskElt(i)); @@ -494,7 +494,7 @@ } case ISD::TargetBlockAddress: case ISD::BlockAddress: { - const BlockAddressSDNode *BA = cast(N); + const auto *BA = cast(N); ID.AddPointer(BA->getBlockAddress()); ID.AddInteger(BA->getOffset()); ID.AddInteger(BA->getTargetFlags()); @@ -720,7 +720,7 @@ Erased = ExternalSymbols.erase(cast(N)->getSymbol()); break; case ISD::TargetExternalSymbol: { - ExternalSymbolSDNode *ESN = cast(N); + auto *ESN = cast(N); Erased = TargetExternalSymbols.erase( std::pair(ESN->getSymbol(), ESN->getTargetFlags())); @@ -1895,9 +1895,9 @@ break; } - if (ConstantSDNode *N2C = dyn_cast(N2)) { + if (auto *N2C = dyn_cast(N2)) { const APInt &C2 = N2C->getAPIntValue(); - if (ConstantSDNode *N1C = dyn_cast(N1)) { + if (auto *N1C = dyn_cast(N1)) { const APInt &C1 = N1C->getAPIntValue(); switch (Cond) { @@ -1915,8 +1915,8 @@ } } } - if (ConstantFPSDNode *N1C = dyn_cast(N1)) { - if (ConstantFPSDNode *N2C = dyn_cast(N2)) { + if (auto *N1C = dyn_cast(N1)) { + if (auto *N2C = dyn_cast(N2)) { APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF()); switch (Cond) { default: break; @@ -2077,7 +2077,7 @@ // by the shuffle. APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth); - const ShuffleVectorSDNode *SVN = cast(Op); + const auto *SVN = cast(Op); assert(NumElts == SVN->getMask().size() && "Unexpected vector size"); for (unsigned i = 0; i != NumElts; ++i) { if (!DemandedElts[i]) @@ -2133,7 +2133,7 @@ // If we know the element index, just demand that subvector elements, // otherwise demand them all. SDValue Src = Op.getOperand(0); - ConstantSDNode *SubIdx = dyn_cast(Op.getOperand(1)); + auto *SubIdx = dyn_cast(Op.getOperand(1)); unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) { // Offset the demanded elts by the subvector index. @@ -2392,7 +2392,7 @@ break; } case ISD::LOAD: { - LoadSDNode *LD = cast(Op); + auto *LD = cast(Op); // If this is a ZEXTLoad and we are looking at the loaded value. if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { EVT VT = LD->getMemoryVT(); @@ -2615,7 +2615,7 @@ KnownZero = KnownZero.trunc(EltBitWidth); KnownOne = KnownOne.trunc(EltBitWidth); } - ConstantSDNode *ConstEltNo = dyn_cast(EltNo); + auto *ConstEltNo = dyn_cast(EltNo); if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) { // If we know the element index, just demand that vector element. unsigned Idx = ConstEltNo->getZExtValue(); @@ -2636,7 +2636,7 @@ SDValue InVal = Op.getOperand(1); SDValue EltNo = Op.getOperand(2); - ConstantSDNode *CEltNo = dyn_cast(EltNo); + auto *CEltNo = dyn_cast(EltNo); if (CEltNo && CEltNo->getAPIntValue().ult(NumElts)) { // If we know the element index, split the demand between the // source vector and the inserted element. @@ -2853,7 +2853,7 @@ break; case ISD::ROTL: case ISD::ROTR: - if (ConstantSDNode *C = dyn_cast(Op.getOperand(1))) { + if (auto *C = dyn_cast(Op.getOperand(1))) { unsigned RotAmt = C->getZExtValue() & (VTBits-1); // Handle rotate right by N like a rotate left by 32-N. @@ -2873,7 +2873,7 @@ if (Tmp == 1) return 1; // Early out. // Special case decrementing a value (ADD X, -1): - if (ConstantSDNode *CRHS = dyn_cast(Op.getOperand(1))) + if (auto *CRHS = dyn_cast(Op.getOperand(1))) if (CRHS->isAllOnesValue()) { APInt KnownZero, KnownOne; computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); @@ -2967,7 +2967,7 @@ // If we are looking at the loaded value of the SDNode. if (Op.getResNo() == 0) { // Handle LOADX separately here. EXTLOAD case will fallthrough. - if (LoadSDNode *LD = dyn_cast(Op)) { + if (auto *LD = dyn_cast(Op)) { unsigned ExtType = LD->getExtensionType(); switch (ExtType) { default: break; @@ -3033,7 +3033,7 @@ return true; // If the value is a constant, we can obviously see if it is a NaN or not. - if (const ConstantFPSDNode *C = dyn_cast(Op)) + if (const auto *C = dyn_cast(Op)) return !C->getValueAPF().isNaN(); // TODO: Recognize more cases here. @@ -3043,14 +3043,14 @@ bool SelectionDAG::isKnownNeverZero(SDValue Op) const { // If the value is a constant, we can obviously see if it is a zero or not. - if (const ConstantFPSDNode *C = dyn_cast(Op)) + if (const auto *C = dyn_cast(Op)) return !C->isZero(); // TODO: Recognize more cases here. switch (Op.getOpcode()) { default: break; case ISD::OR: - if (const ConstantSDNode *C = dyn_cast(Op.getOperand(1))) + if (const auto *C = dyn_cast(Op.getOperand(1))) return !C->isNullValue(); break; } @@ -3063,8 +3063,8 @@ if (A == B) return true; // For for negative and positive zero. - if (const ConstantFPSDNode *CA = dyn_cast(A)) - if (const ConstantFPSDNode *CB = dyn_cast(B)) + if (const auto *CA = dyn_cast(A)) + if (const auto *CB = dyn_cast(B)) if (CA->isZero() && CB->isZero()) return true; // Otherwise they may not be equal. @@ -3153,7 +3153,7 @@ // doesn't create new constants with different values. Nevertheless, the // opaque flag is preserved during folding to prevent future folding with // other constants. - if (ConstantSDNode *C = dyn_cast(Operand)) { + if (auto *C = dyn_cast(Operand)) { const APInt &Val = C->getAPIntValue(); switch (Opcode) { default: break; @@ -3202,7 +3202,7 @@ } // Constant fold unary operations with a floating point constant operand. - if (ConstantFPSDNode *C = dyn_cast(Operand)) { + if (auto *C = dyn_cast(Operand)) { APFloat V = C->getValueAPF(); // make copy switch (Opcode) { case ISD::FNEG: @@ -3263,7 +3263,7 @@ } // Constant fold unary operations with a vector integer or float operand. - if (BuildVectorSDNode *BV = dyn_cast(Operand)) { + if (auto *BV = dyn_cast(Operand)) { if (BV->isConstant()) { switch (Opcode) { default: @@ -3534,7 +3534,7 @@ return SDValue(); if (!TLI->isOffsetFoldingLegal(GA)) return SDValue(); - const ConstantSDNode *Cst2 = dyn_cast(N2); + const auto *Cst2 = dyn_cast(N2); if (!Cst2) return SDValue(); int64_t Offset = Cst2->getSExtValue(); @@ -3557,8 +3557,8 @@ return SDValue(); // Handle the case of two scalars. - if (const ConstantSDNode *Scalar1 = dyn_cast(Cst1)) { - if (const ConstantSDNode *Scalar2 = dyn_cast(Cst2)) { + if (const auto *Scalar1 = dyn_cast(Cst1)) { + if (const auto *Scalar2 = dyn_cast(Cst2)) { SDValue Folded = FoldConstantArithmetic(Opcode, DL, VT, Scalar1, Scalar2); assert((!Folded || !VT.isVector()) && "Can't fold vectors ops with scalar operands"); @@ -3567,16 +3567,16 @@ } // fold (add Sym, c) -> Sym+c - if (GlobalAddressSDNode *GA = dyn_cast(Cst1)) + if (auto *GA = dyn_cast(Cst1)) return FoldSymbolOffset(Opcode, VT, GA, Cst2); if (isCommutativeBinOp(Opcode)) - if (GlobalAddressSDNode *GA = dyn_cast(Cst2)) + if (auto *GA = dyn_cast(Cst2)) return FoldSymbolOffset(Opcode, VT, GA, Cst1); // For vectors extract each constant element into Inputs so we can constant // fold them individually. - BuildVectorSDNode *BV1 = dyn_cast(Cst1); - BuildVectorSDNode *BV2 = dyn_cast(Cst2); + auto *BV1 = dyn_cast(Cst1); + auto *BV2 = dyn_cast(Cst2); if (!BV1 || !BV2) return SDValue(); @@ -3635,7 +3635,7 @@ }; auto IsConstantBuildVectorOrUndef = [&](const SDValue &Op) { - BuildVectorSDNode *BV = dyn_cast(Op); + auto *BV = dyn_cast(Op); return (Op.isUndef()) || (Op.getOpcode() == ISD::CONDCODE) || (BV && BV->isConstant()); }; @@ -3666,7 +3666,7 @@ SmallVector ScalarOps; for (SDValue Op : Ops) { EVT InSVT = Op.getValueType().getScalarType(); - BuildVectorSDNode *InBV = dyn_cast(Op); + auto *InBV = dyn_cast(Op); if (!InBV) { // We've checked that this is UNDEF or a constant of some kind. if (Op.isUndef()) @@ -3707,10 +3707,10 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1, SDValue N2, const SDNodeFlags *Flags) { - ConstantSDNode *N1C = dyn_cast(N1); - ConstantSDNode *N2C = dyn_cast(N2); - ConstantFPSDNode *N1CFP = dyn_cast(N1); - ConstantFPSDNode *N2CFP = dyn_cast(N2); + auto *N1C = dyn_cast(N1); + auto *N2C = dyn_cast(N2); + auto *N1CFP = dyn_cast(N1); + auto *N2CFP = dyn_cast(N2); // Canonicalize constant to RHS if commutative. if (isCommutativeBinOp(Opcode)) { @@ -3907,7 +3907,7 @@ Ops.push_back(getUNDEF(VT.getScalarType())); continue; } - if (ConstantSDNode *C = dyn_cast(Op)) { + if (auto *C = dyn_cast(Op)) { APInt Val = C->getAPIntValue(); Val = Val.zextOrTrunc(VT.getScalarSizeInBits()); Ops.push_back(SignExtendInReg(Val)); @@ -3963,7 +3963,7 @@ // if the indices are known different, extract the element from // the original vector. SDValue N1Op2 = N1.getOperand(2); - ConstantSDNode *N1Op2C = dyn_cast(N1Op2); + auto *N1Op2C = dyn_cast(N1Op2); if (N1Op2C && N2C) { if (N1Op2C->getZExtValue() == N2C->getZExtValue()) { @@ -4190,9 +4190,9 @@ // Perform various simplifications. switch (Opcode) { case ISD::FMA: { - ConstantFPSDNode *N1CFP = dyn_cast(N1); - ConstantFPSDNode *N2CFP = dyn_cast(N2); - ConstantFPSDNode *N3CFP = dyn_cast(N3); + auto *N1CFP = dyn_cast(N1); + auto *N2CFP = dyn_cast(N2); + auto *N3CFP = dyn_cast(N3); if (N1CFP && N2CFP && N3CFP) { APFloat V1 = N1CFP->getValueAPF(); const APFloat &V2 = N2CFP->getValueAPF(); @@ -4222,7 +4222,7 @@ break; } case ISD::SELECT: - if (ConstantSDNode *N1C = dyn_cast(N1)) { + if (auto *N1C = dyn_cast(N1)) { if (N1C->getZExtValue()) return N2; // select true, X, Y -> X return N3; // select false, X, Y -> Y @@ -4233,7 +4233,7 @@ case ISD::VECTOR_SHUFFLE: llvm_unreachable("should use getVectorShuffle constructor!"); case ISD::INSERT_VECTOR_ELT: { - ConstantSDNode *N3C = dyn_cast(N3); + auto *N3C = dyn_cast(N3); // INSERT_VECTOR_ELT into out-of-bounds element is an UNDEF if (N3C && N3C->getZExtValue() >= N1.getValueType().getVectorNumElements()) return getUNDEF(VT); @@ -4319,8 +4319,8 @@ // Add a chain value for each stack argument. for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(), UE = getEntryNode().getNode()->use_end(); U != UE; ++U) - if (LoadSDNode *L = dyn_cast(*U)) - if (FrameIndexSDNode *FI = dyn_cast(L->getBasePtr())) + if (auto *L = dyn_cast(*U)) + if (auto *FI = dyn_cast(L->getBasePtr())) if (FI->getIndex() < 0) ArgChains.push_back(SDValue(L, 1)); @@ -4335,7 +4335,7 @@ assert(!Value.isUndef()); unsigned NumBits = VT.getScalarSizeInBits(); - if (ConstantSDNode *C = dyn_cast(Value)) { + if (auto *C = dyn_cast(Value)) { assert(C->getAPIntValue().getBitWidth() == 8); APInt Val = APInt::getSplat(NumBits, C->getAPIntValue()); if (VT.isInteger()) @@ -4570,7 +4570,7 @@ MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo &MFI = MF.getFrameInfo(); bool OptSize = shouldLowerMemFuncForSize(MF); - FrameIndexSDNode *FI = dyn_cast(Dst); + auto *FI = dyn_cast(Dst); if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) DstAlignCanChange = true; unsigned SrcAlign = DAG.InferPtrAlignment(Src); @@ -4686,7 +4686,7 @@ MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo &MFI = MF.getFrameInfo(); bool OptSize = shouldLowerMemFuncForSize(MF); - FrameIndexSDNode *FI = dyn_cast(Dst); + auto *FI = dyn_cast(Dst); if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) DstAlignCanChange = true; unsigned SrcAlign = DAG.InferPtrAlignment(Src); @@ -4783,7 +4783,7 @@ MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo &MFI = MF.getFrameInfo(); bool OptSize = shouldLowerMemFuncForSize(MF); - FrameIndexSDNode *FI = dyn_cast(Dst); + auto *FI = dyn_cast(Dst); if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) DstAlignCanChange = true; bool IsZeroVal = @@ -4869,7 +4869,7 @@ // Check to see if we should lower the memcpy to loads and stores first. // For cases within the target-specified limits, this is the best choice. - ConstantSDNode *ConstantSize = dyn_cast(Size); + auto *ConstantSize = dyn_cast(Size); if (ConstantSize) { // Memcpy with size zero? Just return the original chain. if (ConstantSize->isNullValue()) @@ -4942,7 +4942,7 @@ // Check to see if we should lower the memmove to loads and stores first. // For cases within the target-specified limits, this is the best choice. - ConstantSDNode *ConstantSize = dyn_cast(Size); + auto *ConstantSize = dyn_cast(Size); if (ConstantSize) { // Memmove with size zero? Just return the original chain. if (ConstantSize->isNullValue()) @@ -5002,7 +5002,7 @@ // Check to see if we should lower the memset to stores first. // For cases within the target-specified limits, this is the best choice. - ConstantSDNode *ConstantSize = dyn_cast(Size); + auto *ConstantSize = dyn_cast(Size); if (ConstantSize) { // Memset with size zero? Just return the original chain. if (ConstantSize->isNullValue()) @@ -5261,7 +5261,7 @@ static MachinePointerInfo InferPointerInfo(SelectionDAG &DAG, SDValue Ptr, int64_t Offset = 0) { // If this is FI+Offset, we can model it. - if (const FrameIndexSDNode *FI = dyn_cast(Ptr)) + if (const auto *FI = dyn_cast(Ptr)) return MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI->getIndex(), Offset); @@ -5284,7 +5284,7 @@ static MachinePointerInfo InferPointerInfo(SelectionDAG &DAG, SDValue Ptr, SDValue OffsetOp) { // If the 'Offset' value isn't a constant, we can't handle this. - if (ConstantSDNode *OffsetNode = dyn_cast(OffsetOp)) + if (auto *OffsetNode = dyn_cast(OffsetOp)) return InferPointerInfo(DAG, Ptr, OffsetNode->getSExtValue()); if (OffsetOp.isUndef()) return InferPointerInfo(DAG, Ptr); @@ -5402,7 +5402,7 @@ SDValue SelectionDAG::getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM) { - LoadSDNode *LD = cast(OrigLoad); + auto *LD = cast(OrigLoad); assert(LD->getOffset().isUndef() && "Load is already a indexed load!"); // Don't propagate the invariant or dereferenceable flags. auto MMOFlags = @@ -5531,7 +5531,7 @@ SDValue SelectionDAG::getIndexedStore(SDValue OrigStore, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM) { - StoreSDNode *ST = cast(OrigStore); + auto *ST = cast(OrigStore); assert(ST->getOffset().isUndef() && "Store is already a indexed store!"); SDVTList VTs = getVTList(Base.getValueType(), MVT::Other); SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset }; @@ -6194,7 +6194,7 @@ } // For MachineNode, initialize the memory references information. - if (MachineSDNode *MN = dyn_cast(N)) + if (auto *MN = dyn_cast(N)) MN->setMemRefs(nullptr, nullptr); // Swap for an appropriately sized array from the recycler. @@ -6855,22 +6855,22 @@ //===----------------------------------------------------------------------===// bool llvm::isNullConstant(SDValue V) { - ConstantSDNode *Const = dyn_cast(V); + auto *Const = dyn_cast(V); return Const != nullptr && Const->isNullValue(); } bool llvm::isNullFPConstant(SDValue V) { - ConstantFPSDNode *Const = dyn_cast(V); + auto *Const = dyn_cast(V); return Const != nullptr && Const->isZero() && !Const->isNegative(); } bool llvm::isAllOnesConstant(SDValue V) { - ConstantSDNode *Const = dyn_cast(V); + auto *Const = dyn_cast(V); return Const != nullptr && Const->isAllOnesValue(); } bool llvm::isOneConstant(SDValue V) { - ConstantSDNode *Const = dyn_cast(V); + auto *Const = dyn_cast(V); return Const != nullptr && Const->isOne(); } @@ -6879,10 +6879,10 @@ } ConstantSDNode *llvm::isConstOrConstSplat(SDValue N) { - if (ConstantSDNode *CN = dyn_cast(N)) + if (auto *CN = dyn_cast(N)) return CN; - if (BuildVectorSDNode *BV = dyn_cast(N)) { + if (auto *BV = dyn_cast(N)) { BitVector UndefElements; ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements); @@ -6898,10 +6898,10 @@ } ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N) { - if (ConstantFPSDNode *CN = dyn_cast(N)) + if (auto *CN = dyn_cast(N)) return CN; - if (BuildVectorSDNode *BV = dyn_cast(N)) { + if (auto *BV = dyn_cast(N)) { BitVector UndefElements; ConstantFPSDNode *CN = BV->getConstantFPSplatNode(&UndefElements); @@ -7066,7 +7066,7 @@ } // Loads don't have side effects, look through them. - if (LoadSDNode *Ld = dyn_cast(*this)) { + if (auto *Ld = dyn_cast(*this)) { if (!Ld->isVolatile()) return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1); } @@ -7246,7 +7246,7 @@ // stack slot's alignment. int FrameIdx = 1 << 31; int64_t FrameOffset = 0; - if (FrameIndexSDNode *FI = dyn_cast(Ptr)) { + if (auto *FI = dyn_cast(Ptr)) { FrameIdx = FI->getIndex(); } else if (isBaseWithConstantOffset(Ptr) && isa(Ptr.getOperand(0))) { @@ -7356,10 +7356,10 @@ if (OpVal.isUndef()) SplatUndef |= APInt::getBitsSet(sz, BitPos, BitPos + EltBitSize); - else if (ConstantSDNode *CN = dyn_cast(OpVal)) + else if (auto *CN = dyn_cast(OpVal)) SplatValue |= CN->getAPIntValue().zextOrTrunc(EltBitSize). zextOrTrunc(sz) << BitPos; - else if (ConstantFPSDNode *CN = dyn_cast(OpVal)) + else if (auto *CN = dyn_cast(OpVal)) SplatValue |= CN->getValueAPF().bitcastToAPInt().zextOrTrunc(sz) <(getSplatValue(UndefElements))) { bool IsExact; APSInt IntVal(BitWidth); @@ -7481,7 +7481,7 @@ return N.getNode(); // Treat a GlobalAddress supporting constant offset folding as a // constant integer. - if (GlobalAddressSDNode *GA = dyn_cast(N)) + if (auto *GA = dyn_cast(N)) if (GA->getOpcode() == ISD::GlobalAddress && TLI->isOffsetFoldingLegal(GA)) return GA; Index: lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -256,12 +256,12 @@ static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V, const Twine &ErrMsg) { - const Instruction *I = dyn_cast_or_null(V); + const auto *I = dyn_cast_or_null(V); if (!V) return Ctx.emitError(ErrMsg); const char *AsmError = ", possible invalid constraint for vector type"; - if (const CallInst *CI = dyn_cast(I)) + if (const auto *CI = dyn_cast(I)) if (isa(CI->getCalledValue())) return Ctx.emitError(I, ErrMsg + AsmError); @@ -1064,13 +1064,13 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) { const TargetLowering &TLI = DAG.getTargetLoweringInfo(); - if (const Constant *C = dyn_cast(V)) { + if (const auto *C = dyn_cast(V)) { EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true); - if (const ConstantInt *CI = dyn_cast(C)) + if (const auto *CI = dyn_cast(C)) return DAG.getConstant(*CI, getCurSDLoc(), VT); - if (const GlobalValue *GV = dyn_cast(C)) + if (const auto *GV = dyn_cast(C)) return DAG.getGlobalAddress(GV, getCurSDLoc(), VT); if (isa(C)) { @@ -1079,13 +1079,13 @@ TLI.getPointerTy(DAG.getDataLayout(), AS)); } - if (const ConstantFP *CFP = dyn_cast(C)) + if (const auto *CFP = dyn_cast(C)) return DAG.getConstantFP(*CFP, getCurSDLoc(), VT); if (isa(C) && !V->getType()->isAggregateType()) return DAG.getUNDEF(VT); - if (const ConstantExpr *CE = dyn_cast(C)) { + if (const auto *CE = dyn_cast(C)) { visit(CE->getOpcode(), *CE); SDValue N1 = NodeMap[V]; assert(N1.getNode() && "visit didn't populate the NodeMap!"); @@ -1108,7 +1108,7 @@ return DAG.getMergeValues(Constants, getCurSDLoc()); } - if (const ConstantDataSequential *CDS = + if (const auto *CDS = dyn_cast(C)) { SmallVector Ops; for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { @@ -1148,16 +1148,16 @@ return DAG.getMergeValues(Constants, getCurSDLoc()); } - if (const BlockAddress *BA = dyn_cast(C)) + if (const auto *BA = dyn_cast(C)) return DAG.getBlockAddress(BA, VT); - VectorType *VecTy = cast(V->getType()); + auto *VecTy = cast(V->getType()); unsigned NumElements = VecTy->getNumElements(); // Now that we know the number and type of the elements, get that number of // elements into the Ops array based on what kind of constant it is. SmallVector Ops; - if (const ConstantVector *CV = dyn_cast(C)) { + if (const auto *CV = dyn_cast(C)) { for (unsigned i = 0; i != NumElements; ++i) Ops.push_back(getValue(CV->getOperand(i))); } else { @@ -1179,7 +1179,7 @@ // If this is a static alloca, generate it as the frameindex instead of // computation. - if (const AllocaInst *AI = dyn_cast(V)) { + if (const auto *AI = dyn_cast(V)) { DenseMap::iterator SI = FuncInfo.StaticAllocaMap.find(AI); if (SI != FuncInfo.StaticAllocaMap.end()) @@ -1188,7 +1188,7 @@ } // If this is an instruction which fast-isel has deferred, select it now. - if (const Instruction *Inst = dyn_cast(V)) { + if (const auto *Inst = dyn_cast(V)) { unsigned InReg = FuncInfo.InitializeRegForValue(Inst); RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg, Inst->getType()); @@ -1512,7 +1512,7 @@ const BasicBlock *FromBB) { // The operands of the setcc have to be in this block. We don't know // how to export them from some other block. - if (const Instruction *VI = dyn_cast(V)) { + if (const auto *VI = dyn_cast(V)) { // Can export from current BB. if (VI->getParent() == FromBB) return true; @@ -1565,7 +1565,7 @@ } static bool InBlock(const Value *V, const BasicBlock *BB) { - if (const Instruction *I = dyn_cast(V)) + if (const auto *I = dyn_cast(V)) return I->getParent() == BB; return true; } @@ -1586,7 +1586,7 @@ // If the leaf of the tree is a comparison, merge the condition into // the caseblock. - if (const CmpInst *BOp = dyn_cast(Cond)) { + if (const auto *BOp = dyn_cast(Cond)) { // The operands of the cmp have to be in this block. We don't know // how to export them from some other block. If this is the first block // of the sequence, no exporting is needed. @@ -1594,10 +1594,10 @@ (isExportableFromCurrentBlock(BOp->getOperand(0), BB) && isExportableFromCurrentBlock(BOp->getOperand(1), BB))) { ISD::CondCode Condition; - if (const ICmpInst *IC = dyn_cast(Cond)) { + if (const auto *IC = dyn_cast(Cond)) { Condition = getICmpCondCode(IC->getPredicate()); } else { - const FCmpInst *FC = cast(Cond); + const auto *FC = cast(Cond); Condition = getFCmpCondCode(FC->getPredicate()); if (TM.Options.NoNaNsFPMath) Condition = getFCmpCodeWithoutNaN(Condition); @@ -1626,7 +1626,7 @@ BranchProbability TProb, BranchProbability FProb) { // If this node is not part of the or/and tree, emit it as a branch. - const Instruction *BOp = dyn_cast(Cond); + const auto *BOp = dyn_cast(Cond); if (!BOp || !(isa(BOp) || isa(BOp)) || (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() || BOp->getParent() != CurBB->getBasicBlock() || @@ -1784,7 +1784,7 @@ // cmp D, E // jle foo // - if (const BinaryOperator *BOp = dyn_cast(CondVal)) { + if (const auto *BOp = dyn_cast(CondVal)) { Instruction::BinaryOps Opcode = BOp->getOpcode(); if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp->hasOneUse() && !I.getMetadata(LLVMContext::MD_unpredictable) && @@ -2227,7 +2227,7 @@ "Cannot lower invokes with arbitrary operand bundles yet!"); const Value *Callee(I.getCalledValue()); - const Function *Fn = dyn_cast(Callee); + const auto *Fn = dyn_cast(Callee); if (isa(Callee)) visitInlineAsm(&I); else if (Fn && Fn->isIntrinsic()) { @@ -2430,7 +2430,7 @@ /// we have the freedom to alter the elements in the result as long as the /// reduction of them stays unchanged. static bool isVectorReductionOp(const User *I) { - const Instruction *Inst = dyn_cast(I); + const auto *Inst = dyn_cast(I); if (!Inst || !Inst->getType()->isVectorTy()) return false; @@ -2444,7 +2444,7 @@ break; case Instruction::FAdd: case Instruction::FMul: - if (const FPMathOperator *FPOp = dyn_cast(Inst)) + if (const auto *FPOp = dyn_cast(Inst)) if (FPOp->getFastMathFlags().unsafeAlgebra()) break; LLVM_FALLTHROUGH; @@ -2490,11 +2490,11 @@ return false; if (Inst->getOpcode() == OpCode || isa(U)) { - if (const FPMathOperator *FPOp = dyn_cast(Inst)) + if (const auto *FPOp = dyn_cast(Inst)) if (!isa(FPOp) && !FPOp->getFastMathFlags().unsafeAlgebra()) return false; UsersToVisit.push_back(U); - } else if (const ShuffleVectorInst *ShufInst = + } else if (const auto *ShufInst = dyn_cast(U)) { // Detect the following pattern: A ShuffleVector instruction together // with a reduction that do partial reduction on the first and second @@ -2537,7 +2537,7 @@ if (ElemNumToReduce != 1) return false; - const ConstantInt *Val = dyn_cast(U->getOperand(1)); + const auto *Val = dyn_cast(U->getOperand(1)); if (!Val || Val->getZExtValue() != 0) return false; @@ -2559,15 +2559,15 @@ bool vec_redux = false; FastMathFlags FMF; - if (const OverflowingBinaryOperator *OFBinOp = + if (const auto *OFBinOp = dyn_cast(&I)) { nuw = OFBinOp->hasNoUnsignedWrap(); nsw = OFBinOp->hasNoSignedWrap(); } - if (const PossiblyExactOperator *ExactOp = + if (const auto *ExactOp = dyn_cast(&I)) exact = ExactOp->isExact(); - if (const FPMathOperator *FPOp = dyn_cast(&I)) + if (const auto *FPOp = dyn_cast(&I)) FMF = FPOp->getFastMathFlags(); if (isVectorReductionOp(&I)) { @@ -2627,12 +2627,12 @@ if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) { - if (const OverflowingBinaryOperator *OFBinOp = + if (const auto *OFBinOp = dyn_cast(&I)) { nuw = OFBinOp->hasNoUnsignedWrap(); nsw = OFBinOp->hasNoSignedWrap(); } - if (const PossiblyExactOperator *ExactOp = + if (const auto *ExactOp = dyn_cast(&I)) exact = ExactOp->isExact(); } @@ -2658,9 +2658,9 @@ void SelectionDAGBuilder::visitICmp(const User &I) { ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE; - if (const ICmpInst *IC = dyn_cast(&I)) + if (const auto *IC = dyn_cast(&I)) predicate = IC->getPredicate(); - else if (const ConstantExpr *IC = dyn_cast(&I)) + else if (const auto *IC = dyn_cast(&I)) predicate = ICmpInst::Predicate(IC->getPredicate()); SDValue Op1 = getValue(I.getOperand(0)); SDValue Op2 = getValue(I.getOperand(1)); @@ -2673,9 +2673,9 @@ void SelectionDAGBuilder::visitFCmp(const User &I) { FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE; - if (const FCmpInst *FC = dyn_cast(&I)) + if (const auto *FC = dyn_cast(&I)) predicate = FC->getPredicate(); - else if (const ConstantExpr *FC = dyn_cast(&I)) + else if (const auto *FC = dyn_cast(&I)) predicate = FCmpInst::Predicate(FC->getPredicate()); SDValue Op1 = getValue(I.getOperand(0)); SDValue Op2 = getValue(I.getOperand(1)); @@ -2915,7 +2915,7 @@ // might fold any kind of constant expression to an integer constant and that // is not what we are looking for. Only regcognize a bitcast of a genuine // constant integer as an opaque constant. - else if(ConstantInt *C = dyn_cast(I.getOperand(0))) + else if(auto *C = dyn_cast(I.getOperand(0))) setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false, /*isOpaque*/true)); else @@ -3421,12 +3421,12 @@ if (TLI.supportSwiftError()) { // Swifterror values can come from either a function parameter with // swifterror attribute or an alloca with swifterror attribute. - if (const Argument *Arg = dyn_cast(SV)) { + if (const auto *Arg = dyn_cast(SV)) { if (Arg->hasSwiftErrorAttr()) return visitLoadFromSwiftError(I); } - if (const AllocaInst *Alloca = dyn_cast(SV)) { + if (const auto *Alloca = dyn_cast(SV)) { if (Alloca->isSwiftError()) return visitLoadFromSwiftError(I); } @@ -3600,12 +3600,12 @@ if (TLI.supportSwiftError()) { // Swifterror values can come from either a function parameter with // swifterror attribute or an alloca with swifterror attribute. - if (const Argument *Arg = dyn_cast(PtrV)) { + if (const auto *Arg = dyn_cast(PtrV)) { if (Arg->hasSwiftErrorAttr()) return visitStoreToSwiftError(I); } - if (const AllocaInst *Alloca = dyn_cast(PtrV)) { + if (const auto *Alloca = dyn_cast(PtrV)) { if (Alloca->isSwiftError()) return visitStoreToSwiftError(I); } @@ -3740,7 +3740,7 @@ LLVMContext &Context = *DAG.getContext(); assert(Ptr->getType()->isVectorTy() && "Uexpected pointer type"); - const GetElementPtrInst *GEP = dyn_cast(Ptr); + const auto *GEP = dyn_cast(Ptr); if (!GEP || GEP->getNumOperands() > 2) return false; @@ -3761,7 +3761,7 @@ Index = SDB->getValue(IndexVal); // Suppress sign extension. - if (SExtInst* Sext = dyn_cast(IndexVal)) { + if (auto * Sext = dyn_cast(IndexVal)) { if (SDB->findValue(Sext->getOperand(0))) { IndexVal = Sext->getOperand(0); Index = SDB->getValue(IndexVal); @@ -4128,7 +4128,7 @@ } if (!I.getType()->isVoidTy()) { - if (VectorType *PTy = dyn_cast(I.getType())) { + if (auto *PTy = dyn_cast(I.getType())) { EVT VT = TLI.getValueType(DAG.getDataLayout(), PTy); Result = DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, Result); } else @@ -4596,7 +4596,7 @@ bool IsExp10 = false; if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 && LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { - if (ConstantFPSDNode *LHSC = dyn_cast(LHS)) { + if (auto *LHSC = dyn_cast(LHS)) { APFloat Ten(10.0f); IsExp10 = LHSC->isExactlyValue(Ten); } @@ -4626,7 +4626,7 @@ // otherwise we end up lowering to a call to __powidf2 (for example). When // optimizing for size, we only want to do this if the expansion would produce // a small number of multiplies, otherwise we do the full expansion. - if (ConstantSDNode *RHSC = dyn_cast(RHS)) { + if (auto *RHSC = dyn_cast(RHS)) { // Get the exponent as a positive value. unsigned Val = RHSC->getSExtValue(); if ((int)Val < 0) Val = -Val; @@ -4695,7 +4695,7 @@ bool SelectionDAGBuilder::EmitFuncArgumentDbgValue( const Value *V, DILocalVariable *Variable, DIExpression *Expr, DILocation *DL, int64_t Offset, bool IsIndirect, const SDValue &N) { - const Argument *Arg = dyn_cast(V); + const auto *Arg = dyn_cast(V); if (!Arg) return false; @@ -4734,8 +4734,8 @@ if (!Op && N.getNode()) // Check if frame index is available. - if (LoadSDNode *LNode = dyn_cast(N.getNode())) - if (FrameIndexSDNode *FINode = + if (auto *LNode = dyn_cast(N.getNode())) + if (auto *FINode = dyn_cast(LNode->getBasePtr().getNode())) Op = MachineOperand::CreateFI(FINode->getIndex()); @@ -4897,7 +4897,7 @@ return nullptr; } case Intrinsic::dbg_declare: { - const DbgDeclareInst &DI = cast(I); + const auto &DI = cast(I); DILocalVariable *Variable = DI.getVariable(); DIExpression *Expression = DI.getExpression(); const Value *Address = DI.getAddress(); @@ -4920,7 +4920,7 @@ N = UnusedArgNodeMap[Address]; SDDbgValue *SDV; if (N.getNode()) { - if (const BitCastInst *BCI = dyn_cast(Address)) + if (const auto *BCI = dyn_cast(Address)) Address = BCI->getOperand(0); // Parameters are handled specially. bool isParameter = Variable->isParameter() || isa(Address); @@ -4947,7 +4947,7 @@ N)) { // If variable is pinned by a alloca in dominating bb then // use StaticAllocaMap. - if (const AllocaInst *AI = dyn_cast(Address)) { + if (const auto *AI = dyn_cast(Address)) { if (AI->getParent() != DI.getParent()) { DenseMap::iterator SI = FuncInfo.StaticAllocaMap.find(AI); @@ -4965,7 +4965,7 @@ return nullptr; } case Intrinsic::dbg_value: { - const DbgValueInst &DI = cast(I); + const auto &DI = cast(I); assert(DI.getVariable() && "Missing variable"); DILocalVariable *Variable = DI.getVariable(); @@ -5006,9 +5006,9 @@ } // Build a debug info table entry. - if (const BitCastInst *BCI = dyn_cast(V)) + if (const auto *BCI = dyn_cast(V)) V = BCI->getOperand(0); - const AllocaInst *AI = dyn_cast(V); + const auto *AI = dyn_cast(V); // Don't handle byval struct arguments or VLAs, for example. if (!AI) { DEBUG(dbgs() << "Dropping debug location info for:\n " << DI << "\n"); @@ -5051,7 +5051,7 @@ } case Intrinsic::eh_sjlj_callsite: { MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI(); - ConstantInt *CI = dyn_cast(I.getArgOperand(0)); + auto *CI = dyn_cast(I.getArgOperand(0)); assert(CI && "Non-constant call site value in eh.sjlj.callsite!"); assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!"); @@ -5061,7 +5061,7 @@ case Intrinsic::eh_sjlj_functioncontext: { // Get and store the index of the function context. MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); - AllocaInst *FnCtx = + auto *FnCtx = cast(I.getArgOperand(0)->stripPointerCasts()); int FI = FuncInfo.StaticAllocaMap[FnCtx]; MFI.setFunctionContextIndex(FI); @@ -5349,7 +5349,7 @@ return nullptr; case Intrinsic::cttz: { SDValue Arg = getValue(I.getArgOperand(0)); - ConstantInt *CI = cast(I.getArgOperand(1)); + auto *CI = cast(I.getArgOperand(1)); EVT Ty = Arg.getValueType(); setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF, sdl, Ty, Arg)); @@ -5357,7 +5357,7 @@ } case Intrinsic::ctlz: { SDValue Arg = getValue(I.getArgOperand(0)); - ConstantInt *CI = cast(I.getArgOperand(1)); + auto *CI = cast(I.getArgOperand(1)); EVT Ty = Arg.getValueType(); setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF, sdl, Ty, Arg)); @@ -5428,7 +5428,7 @@ else Src = getValue(I.getArgOperand(0)); // The guard's value. - AllocaInst *Slot = cast(I.getArgOperand(1)); + auto *Slot = cast(I.getArgOperand(1)); int FI = FuncInfo.StaticAllocaMap[Slot]; MFI.setStackProtectorIndex(FI); @@ -5445,7 +5445,7 @@ } case Intrinsic::objectsize: { // If we don't know by now, we're never going to know. - ConstantInt *CI = dyn_cast(I.getArgOperand(1)); + auto *CI = dyn_cast(I.getArgOperand(1)); assert(CI && "Non-constant type in __builtin_object_size?"); @@ -5472,7 +5472,7 @@ return nullptr; case Intrinsic::init_trampoline: { - const Function *F = cast(I.getArgOperand(1)->stripPointerCasts()); + const auto *F = cast(I.getArgOperand(1)->stripPointerCasts()); SDValue Ops[6]; Ops[0] = getRoot(); @@ -5501,9 +5501,9 @@ "only valid in functions with gc specified, enforced by Verifier"); assert(GFI && "implied by previous"); const Value *Alloca = I.getArgOperand(0)->stripPointerCasts(); - const Constant *TypeMap = cast(I.getArgOperand(1)); + const auto *TypeMap = cast(I.getArgOperand(1)); - FrameIndexSDNode *FI = cast(getValue(Alloca).getNode()); + auto *FI = cast(getValue(Alloca).getNode()); GFI->addStackRoot(FI->getIndex(), TypeMap); return nullptr; } @@ -5599,7 +5599,7 @@ for (SmallVectorImpl::iterator Object = Allocas.begin(), E = Allocas.end(); Object != E; ++Object) { - AllocaInst *LifetimeObject = dyn_cast_or_null(*Object); + auto *LifetimeObject = dyn_cast_or_null(*Object); // Could not find an Alloca. if (!LifetimeObject) @@ -5671,7 +5671,7 @@ Value *Arg = I.getArgOperand(Idx)->stripPointerCasts(); if (isa(Arg)) continue; // Skip null pointers. They represent a hole in index space. - AllocaInst *Slot = cast(Arg); + auto *Slot = cast(Arg); assert(FuncInfo.StaticAllocaMap.count(Slot) && "can only escape static allocas"); int FI = FuncInfo.StaticAllocaMap[Slot]; @@ -5894,9 +5894,9 @@ /// value is equal or not-equal to zero. static bool IsOnlyUsedInZeroEqualityComparison(const Value *V) { for (const User *U : V->users()) { - if (const ICmpInst *IC = dyn_cast(U)) + if (const auto *IC = dyn_cast(U)) if (IC->isEquality()) - if (const Constant *C = dyn_cast(IC->getOperand(1))) + if (const auto *C = dyn_cast(IC->getOperand(1))) if (C->isNullValue()) continue; // Unknown instruction. @@ -5911,7 +5911,7 @@ // Check to see if this load can be trivially constant folded, e.g. if the // input is from a string literal. - if (const Constant *LoadInput = dyn_cast(PtrVal)) { + if (const auto *LoadInput = dyn_cast(PtrVal)) { // Cast pointer to the type we really want to load. LoadInput = ConstantExpr::getBitCast(const_cast(LoadInput), PointerType::getUnqual(LoadTy)); @@ -5974,7 +5974,7 @@ return false; const Value *Size = I.getArgOperand(2); - const ConstantInt *CSize = dyn_cast(Size); + const auto *CSize = dyn_cast(Size); if (CSize && CSize->getZExtValue() == 0) { EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), I.getType(), true); @@ -6529,14 +6529,14 @@ // If this is an indirect operand, the operand is a pointer to the // accessed type. if (isIndirect) { - llvm::PointerType *PtrTy = dyn_cast(OpTy); + auto *PtrTy = dyn_cast(OpTy); if (!PtrTy) report_fatal_error("Indirect operand for inline asm not a pointer!"); OpTy = PtrTy->getElementType(); } // Look for vector wrapped in a struct. e.g. { <16 x i8> }. - if (StructType *STy = dyn_cast(OpTy)) + if (auto *STy = dyn_cast(OpTy)) if (STy->getNumElements() == 1) OpTy = STy->getElementType(0); @@ -6780,7 +6780,7 @@ public: explicit ExtraFlags(ImmutableCallSite CS) { - const InlineAsm *IA = cast(CS.getCalledValue()); + const auto *IA = cast(CS.getCalledValue()); if (IA->hasSideEffects()) Flags |= InlineAsm::Extra_HasSideEffects; if (IA->isAlignStack()) @@ -6812,7 +6812,7 @@ /// visitInlineAsm - Handle a call to an InlineAsm object. /// void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) { - const InlineAsm *IA = cast(CS.getCalledValue()); + const auto *IA = cast(CS.getCalledValue()); /// ConstraintOperands - Information about all of the constraints. SDISelAsmOperandInfoVector ConstraintOperands; @@ -6841,7 +6841,7 @@ // Process the call argument. BasicBlocks are labels, currently appearing // only in asm's. - if (const BasicBlock *BB = dyn_cast(OpInfo.CallOperandVal)) { + if (const auto *BB = dyn_cast(OpInfo.CallOperandVal)) { OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]); } else { OpInfo.CallOperand = getValue(OpInfo.CallOperandVal); @@ -6857,7 +6857,7 @@ // The return value of the call is this value. As such, there is no // corresponding argument. assert(!CS.getType()->isVoidTy() && "Bad inline asm!"); - if (StructType *STy = dyn_cast(CS.getType())) { + if (auto *STy = dyn_cast(CS.getType())) { OpVT = TLI.getSimpleValueType(DAG.getDataLayout(), STy->getElementType(ResNo)); } else { @@ -7377,12 +7377,12 @@ SelectionDAGBuilder &Builder) { for (unsigned i = StartIdx, e = CS.arg_size(); i != e; ++i) { SDValue OpVal = Builder.getValue(CS.getArgument(i)); - if (ConstantSDNode *C = dyn_cast(OpVal)) { + if (auto *C = dyn_cast(OpVal)) { Ops.push_back( Builder.DAG.getTargetConstant(StackMaps::ConstantOp, DL, MVT::i64)); Ops.push_back( Builder.DAG.getTargetConstant(C->getSExtValue(), DL, MVT::i64)); - } else if (FrameIndexSDNode *FI = dyn_cast(OpVal)) { + } else if (auto *FI = dyn_cast(OpVal)) { const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo(); Ops.push_back(Builder.DAG.getTargetFrameIndex( FI->getIndex(), TLI.getPointerTy(Builder.DAG.getDataLayout()))); @@ -7752,7 +7752,7 @@ Flags.setByVal(); } if (Args[i].isByVal || Args[i].isInAlloca) { - PointerType *Ty = cast(Args[i].Ty); + auto *Ty = cast(Args[i].Ty); Type *ElementTy = Ty->getElementType(); Flags.setByValSize(DL.getTypeAllocSize(ElementTy)); // For ByVal, alignment should come from FE. BE will guess if this @@ -8044,7 +8044,7 @@ Flags.setByVal(); } if (Flags.isByVal() || Flags.isInAlloca()) { - PointerType *Ty = cast(I->getType()); + auto *Ty = cast(I->getType()); Type *ElementTy = Ty->getElementType(); Flags.setByValSize(DL.getTypeAllocSize(ElementTy)); // For ByVal, alignment should be passed from FE. BE will guess if @@ -8149,7 +8149,7 @@ SDB->setUnusedArgValue(&*I, InVals[i]); // Also remember any frame index for use in FastISel. - if (FrameIndexSDNode *FI = + if (auto *FI = dyn_cast(InVals[i].getNode())) FuncInfo->setArgumentFrameIndex(&*I, FI->getIndex()); } @@ -8182,7 +8182,7 @@ continue; // Note down frame index. - if (FrameIndexSDNode *FI = + if (auto *FI = dyn_cast(ArgValues[0].getNode())) FuncInfo->setArgumentFrameIndex(&*I, FI->getIndex()); @@ -8191,9 +8191,9 @@ SDB->setValue(&*I, Res); if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) { - if (LoadSDNode *LNode = + if (auto *LNode = dyn_cast(Res.getOperand(0).getNode())) - if (FrameIndexSDNode *FI = + if (auto *FI = dyn_cast(LNode->getBasePtr().getNode())) FuncInfo->setArgumentFrameIndex(&*I, FI->getIndex()); } @@ -8262,7 +8262,7 @@ // nodes and Machine PHI nodes, but the incoming operands have not been // emitted yet. for (BasicBlock::const_iterator I = SuccBB->begin(); - const PHINode *PN = dyn_cast(I); ++I) { + const auto *PN = dyn_cast(I); ++I) { // Ignore dead phi's. if (PN->use_empty()) continue; @@ -8273,7 +8273,7 @@ unsigned Reg; const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB); - if (const Constant *C = dyn_cast(PHIOp)) { + if (const auto *C = dyn_cast(PHIOp)) { unsigned &RegOut = ConstantsOut[C]; if (RegOut == 0) { RegOut = FuncInfo.CreateRegs(C->getType()); Index: lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp @@ -398,7 +398,7 @@ } void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const { - if (const MachineSDNode *MN = dyn_cast(this)) { + if (const auto *MN = dyn_cast(this)) { if (!MN->memoperands_empty()) { OS << "<"; OS << "Mem:"; @@ -410,7 +410,7 @@ } OS << ">"; } - } else if (const ShuffleVectorSDNode *SVN = + } else if (const auto *SVN = dyn_cast(this)) { OS << "<"; for (unsigned i = 0, e = ValueList[0].getVectorNumElements(); i != e; ++i) { @@ -422,9 +422,9 @@ OS << Idx; } OS << ">"; - } else if (const ConstantSDNode *CSDN = dyn_cast(this)) { + } else if (const auto *CSDN = dyn_cast(this)) { OS << '<' << CSDN->getAPIntValue() << '>'; - } else if (const ConstantFPSDNode *CSDN = dyn_cast(this)) { + } else if (const auto *CSDN = dyn_cast(this)) { if (&CSDN->getValueAPF().getSemantics()==&APFloat::IEEEsingle) OS << '<' << CSDN->getValueAPF().convertToFloat() << '>'; else if (&CSDN->getValueAPF().getSemantics()==&APFloat::IEEEdouble) @@ -434,7 +434,7 @@ CSDN->getValueAPF().bitcastToAPInt().dump(); OS << ")>"; } - } else if (const GlobalAddressSDNode *GADN = + } else if (const auto *GADN = dyn_cast(this)) { int64_t offset = GADN->getOffset(); OS << '<'; @@ -446,13 +446,13 @@ OS << " " << offset; if (unsigned int TF = GADN->getTargetFlags()) OS << " [TF=" << TF << ']'; - } else if (const FrameIndexSDNode *FIDN = dyn_cast(this)) { + } else if (const auto *FIDN = dyn_cast(this)) { OS << "<" << FIDN->getIndex() << ">"; - } else if (const JumpTableSDNode *JTDN = dyn_cast(this)) { + } else if (const auto *JTDN = dyn_cast(this)) { OS << "<" << JTDN->getIndex() << ">"; if (unsigned int TF = JTDN->getTargetFlags()) OS << " [TF=" << TF << ']'; - } else if (const ConstantPoolSDNode *CP = dyn_cast(this)){ + } else if (const auto *CP = dyn_cast(this)){ int offset = CP->getOffset(); if (CP->isMachineConstantPoolEntry()) OS << "<" << *CP->getMachineCPVal() << ">"; @@ -464,38 +464,38 @@ OS << " " << offset; if (unsigned int TF = CP->getTargetFlags()) OS << " [TF=" << TF << ']'; - } else if (const TargetIndexSDNode *TI = dyn_cast(this)) { + } else if (const auto *TI = dyn_cast(this)) { OS << "<" << TI->getIndex() << '+' << TI->getOffset() << ">"; if (unsigned TF = TI->getTargetFlags()) OS << " [TF=" << TF << ']'; - } else if (const BasicBlockSDNode *BBDN = dyn_cast(this)) { + } else if (const auto *BBDN = dyn_cast(this)) { OS << "<"; const Value *LBB = (const Value*)BBDN->getBasicBlock()->getBasicBlock(); if (LBB) OS << LBB->getName() << " "; OS << (const void*)BBDN->getBasicBlock() << ">"; - } else if (const RegisterSDNode *R = dyn_cast(this)) { + } else if (const auto *R = dyn_cast(this)) { OS << ' ' << PrintReg(R->getReg(), G ? G->getSubtarget().getRegisterInfo() : nullptr); - } else if (const ExternalSymbolSDNode *ES = + } else if (const auto *ES = dyn_cast(this)) { OS << "'" << ES->getSymbol() << "'"; if (unsigned int TF = ES->getTargetFlags()) OS << " [TF=" << TF << ']'; - } else if (const SrcValueSDNode *M = dyn_cast(this)) { + } else if (const auto *M = dyn_cast(this)) { if (M->getValue()) OS << "<" << M->getValue() << ">"; else OS << ""; - } else if (const MDNodeSDNode *MD = dyn_cast(this)) { + } else if (const auto *MD = dyn_cast(this)) { if (MD->getMD()) OS << "<" << MD->getMD() << ">"; else OS << ""; - } else if (const VTSDNode *N = dyn_cast(this)) { + } else if (const auto *N = dyn_cast(this)) { OS << ":" << N->getVT().getEVTString(); } - else if (const LoadSDNode *LD = dyn_cast(this)) { + else if (const auto *LD = dyn_cast(this)) { OS << "<" << *LD->getMemOperand(); bool doExt = true; @@ -513,7 +513,7 @@ OS << ", " << AM; OS << ">"; - } else if (const StoreSDNode *ST = dyn_cast(this)) { + } else if (const auto *ST = dyn_cast(this)) { OS << "<" << *ST->getMemOperand(); if (ST->isTruncatingStore()) @@ -524,9 +524,9 @@ OS << ", " << AM; OS << ">"; - } else if (const MemSDNode* M = dyn_cast(this)) { + } else if (const auto * M = dyn_cast(this)) { OS << "<" << *M->getMemOperand() << ">"; - } else if (const BlockAddressSDNode *BA = + } else if (const auto *BA = dyn_cast(this)) { int64_t offset = BA->getOffset(); OS << "<"; @@ -540,7 +540,7 @@ OS << " " << offset; if (unsigned int TF = BA->getTargetFlags()) OS << " [TF=" << TF << ']'; - } else if (const AddrSpaceCastSDNode *ASC = + } else if (const auto *ASC = dyn_cast(this)) { OS << '[' << ASC->getSrcAddressSpace() Index: lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -396,7 +396,7 @@ static void SplitCriticalSideEffectEdges(Function &Fn) { // Loop for blocks with phi nodes. for (BasicBlock &BB : Fn) { - PHINode *PN = dyn_cast(BB.begin()); + auto *PN = dyn_cast(BB.begin()); if (!PN) continue; ReprocessBlock: @@ -406,7 +406,7 @@ // PHI. for (BasicBlock::iterator I = BB.begin(); (PN = dyn_cast(I)); ++I) for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { - ConstantExpr *CE = dyn_cast(PN->getIncomingValue(i)); + auto *CE = dyn_cast(PN->getIncomingValue(i)); if (!CE || !CE->canTrap()) continue; // The only case we have to worry about is when the edge is critical. @@ -973,7 +973,7 @@ static bool hasExceptionPointerOrCodeUser(const CatchPadInst *CPI) { for (const User *U : CPI->users()) { - if (const IntrinsicInst *EHPtrCall = dyn_cast(U)) { + if (const auto *EHPtrCall = dyn_cast(U)) { Intrinsic::ID IID = EHPtrCall->getIntrinsicID(); if (IID == Intrinsic::eh_exceptionpointer || IID == Intrinsic::eh_exceptioncode) @@ -1183,7 +1183,7 @@ for (const auto &LLVMBB : Fn) for (const auto &Inst : LLVMBB) { - if (const AllocaInst *Alloca = dyn_cast(&Inst)) + if (const auto *Alloca = dyn_cast(&Inst)) if (Alloca->isSwiftError()) FuncInfo->SwiftErrorVals.push_back(Alloca); } @@ -1365,11 +1365,11 @@ if (AllPredsVisited) { for (BasicBlock::const_iterator I = LLVMBB->begin(); - const PHINode *PN = dyn_cast(I); ++I) + const auto *PN = dyn_cast(I); ++I) FuncInfo->ComputePHILiveOutRegInfo(PN); } else { for (BasicBlock::const_iterator I = LLVMBB->begin(); - const PHINode *PN = dyn_cast(I); ++I) + const auto *PN = dyn_cast(I); ++I) FuncInfo->InvalidatePHILiveOutRegInfo(PN); } @@ -2197,8 +2197,8 @@ void SelectionDAGISel::Select_READ_REGISTER(SDNode *Op) { SDLoc dl(Op); - MDNodeSDNode *MD = dyn_cast(Op->getOperand(1)); - const MDString *RegStr = dyn_cast(MD->getMD()->getOperand(0)); + auto *MD = dyn_cast(Op->getOperand(1)); + const auto *RegStr = dyn_cast(MD->getMD()->getOperand(0)); unsigned Reg = TLI->getRegisterByName(RegStr->getString().data(), Op->getValueType(0), *CurDAG); @@ -2211,8 +2211,8 @@ void SelectionDAGISel::Select_WRITE_REGISTER(SDNode *Op) { SDLoc dl(Op); - MDNodeSDNode *MD = dyn_cast(Op->getOperand(1)); - const MDString *RegStr = dyn_cast(MD->getMD()->getOperand(0)); + auto *MD = dyn_cast(Op->getOperand(1)); + const auto *RegStr = dyn_cast(MD->getMD()->getOperand(0)); unsigned Reg = TLI->getRegisterByName(RegStr->getString().data(), Op->getOperand(2).getValueType(), *CurDAG); @@ -2630,7 +2630,7 @@ if (Val & 128) Val = GetVBR(Val, MatcherTable, MatcherIndex); - ConstantSDNode *C = dyn_cast(N); + auto *C = dyn_cast(N); return C && C->getSExtValue() == Val; } @@ -2651,7 +2651,7 @@ if (N->getOpcode() != ISD::AND) return false; - ConstantSDNode *C = dyn_cast(N->getOperand(1)); + auto *C = dyn_cast(N->getOperand(1)); return C && SDISel.CheckAndMask(N.getOperand(0), C, Val); } @@ -2664,7 +2664,7 @@ if (N->getOpcode() != ISD::OR) return false; - ConstantSDNode *C = dyn_cast(N->getOperand(1)); + auto *C = dyn_cast(N->getOperand(1)); return C && SDISel.CheckOrMask(N.getOperand(0), C, Val); } Index: lib/CodeGen/SelectionDAG/StatepointLowering.cpp =================================================================== --- lib/CodeGen/SelectionDAG/StatepointLowering.cpp +++ lib/CodeGen/SelectionDAG/StatepointLowering.cpp @@ -134,13 +134,13 @@ } // Look through bitcast instructions. - if (const BitCastInst *Cast = dyn_cast(Val)) + if (const auto *Cast = dyn_cast(Val)) return findPreviousSpillSlot(Cast->getOperand(0), Builder, LookUpDepth - 1); // Look through phi nodes // All incoming values should have same known stack slot, otherwise result // is unknown. - if (const PHINode *Phi = dyn_cast(Val)) { + if (const auto *Phi = dyn_cast(Val)) { Optional MergedResult = None; for (auto &IncomingValue : Phi->incoming_values()) { @@ -374,7 +374,7 @@ SelectionDAGBuilder &Builder) { SDValue Chain = Builder.getRoot(); - if (ConstantSDNode *C = dyn_cast(Incoming)) { + if (auto *C = dyn_cast(Incoming)) { // If the original value was a constant, make sure it gets recorded as // such in the stackmap. This is required so that the consumer can // parse any internal format to the deopt state. It also handles null @@ -382,7 +382,7 @@ // vectors do not appear to actually hit this path and that anything larger // than an i64 value (not type!) will fail asserts here. pushStackMapConstant(Ops, Builder, C->getSExtValue()); - } else if (FrameIndexSDNode *FI = dyn_cast(Incoming)) { + } else if (auto *FI = dyn_cast(Incoming)) { // This handles allocas as arguments to the statepoint (this is only // really meaningful for a deopt value. For GC, we'd be trying to // relocate the address of the alloca itself?) @@ -520,7 +520,7 @@ // the alloca for (Value *V : SI.GCArgs) { SDValue Incoming = Builder.getValue(V); - if (FrameIndexSDNode *FI = dyn_cast(Incoming)) { + if (auto *FI = dyn_cast(Incoming)) { // This handles allocas as arguments to the statepoint Ops.push_back(Builder.DAG.getTargetFrameIndex(FI->getIndex(), Incoming.getValueType())); @@ -902,7 +902,7 @@ // register because statepoint and actual call return types can be // different, and getValue() will use CopyFromReg of the wrong type, // which is always i32 in our case. - PointerType *CalleeType = cast( + auto *CalleeType = cast( ImmutableStatepoint(I).getCalledValue()->getType()); Type *RetTy = cast(CalleeType->getElementType())->getReturnType(); Index: lib/CodeGen/SelectionDAG/TargetLowering.cpp =================================================================== --- lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -347,7 +347,7 @@ case ISD::XOR: case ISD::AND: case ISD::OR: { - ConstantSDNode *C = dyn_cast(Op.getOperand(1)); + auto *C = dyn_cast(Op.getOperand(1)); if (!C) return false; if (Op.getOpcode() == ISD::XOR && @@ -553,7 +553,7 @@ // using the bits from the RHS. Below, we use knowledge about the RHS to // simplify the LHS, here we're using information from the LHS to simplify // the RHS. - if (ConstantSDNode *RHSC = dyn_cast(Op.getOperand(1))) { + if (auto *RHSC = dyn_cast(Op.getOperand(1))) { SDValue Op0 = Op.getOperand(0); APInt LHSZero, LHSOne; // Do not increment Depth here; that can cause an infinite loop. @@ -694,7 +694,7 @@ // If the RHS is a constant, see if we can simplify it. // for XOR, we prefer to force bits to 1 if they will make a -1. // If we can't force bits, try to shrink the constant. - if (ConstantSDNode *C = dyn_cast(Op.getOperand(1))) { + if (auto *C = dyn_cast(Op.getOperand(1))) { APInt Expanded = C->getAPIntValue() | (~NewMask); // If we can expand it to have all bits set, do it. if (Expanded.isAllOnesValue()) { @@ -751,7 +751,7 @@ KnownZero &= KnownZero2; break; case ISD::SHL: - if (ConstantSDNode *SA = dyn_cast(Op.getOperand(1))) { + if (auto *SA = dyn_cast(Op.getOperand(1))) { unsigned ShAmt = SA->getZExtValue(); SDValue InOp = Op.getOperand(0); @@ -838,7 +838,7 @@ } break; case ISD::SRL: - if (ConstantSDNode *SA = dyn_cast(Op.getOperand(1))) { + if (auto *SA = dyn_cast(Op.getOperand(1))) { EVT VT = Op.getValueType(); unsigned ShAmt = SA->getZExtValue(); unsigned VTSize = VT.getSizeInBits(); @@ -898,7 +898,7 @@ TLO.DAG.getNode(ISD::SRL, dl, Op.getValueType(), Op.getOperand(0), Op.getOperand(1))); - if (ConstantSDNode *SA = dyn_cast(Op.getOperand(1))) { + if (auto *SA = dyn_cast(Op.getOperand(1))) { EVT VT = Op.getValueType(); unsigned ShAmt = SA->getZExtValue(); @@ -1148,7 +1148,7 @@ // Do not turn (vt1 truncate (vt2 srl)) into (vt1 srl) if vt1 is // undesirable. break; - ConstantSDNode *ShAmt = dyn_cast(In.getOperand(1)); + auto *ShAmt = dyn_cast(In.getOperand(1)); if (!ShAmt) break; SDValue Shift = In.getOperand(1); @@ -1265,7 +1265,7 @@ for (SDNodeIterator I = SDNodeIterator::begin(N), E = SDNodeIterator::end(N); I != E; ++I) { SDNode *Op = *I; - if (ConstantSDNode *C = dyn_cast(Op)) + if (auto *C = dyn_cast(Op)) if (C->isOpaque()) return false; } @@ -1310,9 +1310,9 @@ if (!N) return false; - const ConstantSDNode *CN = dyn_cast(N); + const auto *CN = dyn_cast(N); if (!CN) { - const BuildVectorSDNode *BV = dyn_cast(N); + const auto *BV = dyn_cast(N); if (!BV) return false; @@ -1350,9 +1350,9 @@ if (!N) return false; - const ConstantSDNode *CN = dyn_cast(N); + const auto *CN = dyn_cast(N); if (!CN) { - const BuildVectorSDNode *BV = dyn_cast(N); + const auto *BV = dyn_cast(N); if (!BV) return false; @@ -1632,7 +1632,7 @@ isa(N0.getOperand(0)) && N0.getOperand(0).getNode()->hasOneUse() && isa(N0.getOperand(1))) { - LoadSDNode *Lod = cast(N0.getOperand(0)); + auto *Lod = cast(N0.getOperand(0)); APInt bestMask; unsigned bestWidth = 0, bestOffset = 0; if (!Lod->isVolatile() && Lod->isUnindexed()) { @@ -2410,8 +2410,8 @@ // These operands are interested in values of the form (GV+C), where C may // be folded in as an offset of GV, or it may be explicitly added. Also, it // is possible and fine if either GV or C are missing. - ConstantSDNode *C = dyn_cast(Op); - GlobalAddressSDNode *GA = dyn_cast(Op); + auto *C = dyn_cast(Op); + auto *GA = dyn_cast(Op); // If we have "(add GV, C)", pull out GV/C if (Op.getOpcode() == ISD::ADD) { @@ -2527,7 +2527,7 @@ ImmutableCallSite CS) const { /// Information about all of the constraints. AsmOperandInfoVector ConstraintOperands; - const InlineAsm *IA = cast(CS.getCalledValue()); + const auto *IA = cast(CS.getCalledValue()); unsigned maCount = 0; // Largest number of multiple alternative constraints. // Do a prepass over the constraints, canonicalizing them, and building up the @@ -2558,7 +2558,7 @@ // corresponding argument. assert(!CS.getType()->isVoidTy() && "Bad inline asm!"); - if (StructType *STy = dyn_cast(CS.getType())) { + if (auto *STy = dyn_cast(CS.getType())) { OpInfo.ConstraintVT = getSimpleValueType(DL, STy->getElementType(ResNo)); } else { @@ -2578,14 +2578,14 @@ if (OpInfo.CallOperandVal) { llvm::Type *OpTy = OpInfo.CallOperandVal->getType(); if (OpInfo.isIndirect) { - llvm::PointerType *PtrTy = dyn_cast(OpTy); + auto *PtrTy = dyn_cast(OpTy); if (!PtrTy) report_fatal_error("Indirect operand for inline asm not a pointer!"); OpTy = PtrTy->getElementType(); } // Look for vector wrapped in a struct. e.g. { <16 x i8> }. - if (StructType *STy = dyn_cast(OpTy)) + if (auto *STy = dyn_cast(OpTy)) if (STy->getNumElements() == 1) OpTy = STy->getElementType(0); @@ -2605,7 +2605,7 @@ MVT::getVT(IntegerType::get(OpTy->getContext(), BitSize), true); break; } - } else if (PointerType *PT = dyn_cast(OpTy)) { + } else if (auto *PT = dyn_cast(OpTy)) { unsigned PtrSize = DL.getPointerSizeInBits(PT->getAddressSpace()); OpInfo.ConstraintVT = MVT::getIntegerVT(PtrSize); } else { @@ -3786,7 +3786,7 @@ return SDValue(); ISD::CondCode CC = cast(Op.getOperand(2))->get(); SDLoc dl(Op); - if (ConstantSDNode *C = dyn_cast(Op.getOperand(1))) { + if (auto *C = dyn_cast(Op.getOperand(1))) { if (C->isNullValue() && CC == ISD::SETEQ) { EVT VT = Op.getOperand(0).getValueType(); SDValue Zext = Op.getOperand(0); Index: lib/CodeGen/ShadowStackGCLowering.cpp =================================================================== --- lib/CodeGen/ShadowStackGCLowering.cpp +++ lib/CodeGen/ShadowStackGCLowering.cpp @@ -90,7 +90,7 @@ unsigned NumMeta = 0; SmallVector Metadata; for (unsigned I = 0; I != Roots.size(); ++I) { - Constant *C = cast(Roots[I].first->getArgOperand(1)); + auto *C = cast(Roots[I].first->getArgOperand(1)); if (!C->isNullValue()) NumMeta = I + 1; Metadata.push_back(ConstantExpr::getBitCast(C, VoidPtr)); @@ -203,7 +203,7 @@ } bool ShadowStackGCLowering::IsNullValue(Value *V) { - if (Constant *C = dyn_cast(V)) + if (auto *C = dyn_cast(V)) return C->isNullValue(); return false; } @@ -219,7 +219,7 @@ for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;) - if (IntrinsicInst *CI = dyn_cast(II++)) + if (auto *CI = dyn_cast(II++)) if (Function *F = CI->getCalledFunction()) if (F->getIntrinsicID() == Intrinsic::gcroot) { std::pair Pair = std::make_pair( Index: lib/CodeGen/SjLjEHPrepare.cpp =================================================================== --- lib/CodeGen/SjLjEHPrepare.cpp +++ lib/CodeGen/SjLjEHPrepare.cpp @@ -269,7 +269,7 @@ // Avoid iterator invalidation by copying users to a temporary vector. SmallVector Users; for (User *U : Inst.users()) { - Instruction *UI = cast(U); + auto *UI = cast(U); if (UI->getParent() != &BB || isa(UI)) Users.push_back(UI); } @@ -284,7 +284,7 @@ MarkBlocksLiveIn(U->getParent(), LiveBBs); } else { // Uses for a PHI node occur in their predecessor block. - PHINode *PN = cast(U); + auto *PN = cast(U); for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) if (PN->getIncomingValue(i) == &Inst) MarkBlocksLiveIn(PN->getIncomingBlock(i), LiveBBs); Index: lib/CodeGen/StackColoring.cpp =================================================================== --- lib/CodeGen/StackColoring.cpp +++ lib/CodeGen/StackColoring.cpp @@ -822,7 +822,7 @@ if (FromAI->isUsedByMetadata()) ValueAsMetadata::handleRAUW(FromAI, UndefValue::get(FromAI->getType())); for (auto &Use : FromAI->uses()) { - if (BitCastInst *BCI = dyn_cast(Use.get())) + if (auto *BCI = dyn_cast(Use.get())) if (BCI->isUsedByMetadata()) ValueAsMetadata::handleRAUW(BCI, UndefValue::get(BCI->getType())); } @@ -852,7 +852,7 @@ // We've replaced IR-level uses of the remapped allocas, so we only // need to replace direct uses here. - const AllocaInst *AI = dyn_cast_or_null(MMO->getValue()); + const auto *AI = dyn_cast_or_null(MMO->getValue()); if (!AI) continue; Index: lib/CodeGen/StackProtector.cpp =================================================================== --- lib/CodeGen/StackProtector.cpp +++ lib/CodeGen/StackProtector.cpp @@ -121,7 +121,7 @@ bool InStruct) const { if (!Ty) return false; - if (ArrayType *AT = dyn_cast(Ty)) { + if (auto *AT = dyn_cast(Ty)) { if (!AT->getElementType()->isIntegerTy(8)) { // If we're on a non-Darwin platform or we're inside of a structure, don't // add stack protectors unless the array is a character array. @@ -143,7 +143,7 @@ return true; } - const StructType *ST = dyn_cast(Ty); + const auto *ST = dyn_cast(Ty); if (!ST) return false; @@ -165,29 +165,29 @@ bool StackProtector::HasAddressTaken(const Instruction *AI) { for (const User *U : AI->users()) { - if (const StoreInst *SI = dyn_cast(U)) { + if (const auto *SI = dyn_cast(U)) { if (AI == SI->getValueOperand()) return true; - } else if (const PtrToIntInst *SI = dyn_cast(U)) { + } else if (const auto *SI = dyn_cast(U)) { if (AI == SI->getOperand(0)) return true; } else if (isa(U)) { return true; } else if (isa(U)) { return true; - } else if (const SelectInst *SI = dyn_cast(U)) { + } else if (const auto *SI = dyn_cast(U)) { if (HasAddressTaken(SI)) return true; - } else if (const PHINode *PN = dyn_cast(U)) { + } else if (const auto *PN = dyn_cast(U)) { // Keep track of what PHI nodes we have already visited to ensure // they are only visited once. if (VisitedPHIs.insert(PN).second) if (HasAddressTaken(PN)) return true; - } else if (const GetElementPtrInst *GEP = dyn_cast(U)) { + } else if (const auto *GEP = dyn_cast(U)) { if (HasAddressTaken(GEP)) return true; - } else if (const BitCastInst *BI = dyn_cast(U)) { + } else if (const auto *BI = dyn_cast(U)) { if (HasAddressTaken(BI)) return true; } @@ -213,7 +213,7 @@ bool NeedsProtector = false; for (const BasicBlock &BB : *F) for (const Instruction &I : BB) - if (const CallInst *CI = dyn_cast(&I)) + if (const auto *CI = dyn_cast(&I)) if (CI->getCalledFunction() == Intrinsic::getDeclaration(F->getParent(), Intrinsic::stackprotector)) @@ -234,7 +234,7 @@ for (const BasicBlock &BB : *F) { for (const Instruction &I : BB) { - if (const AllocaInst *AI = dyn_cast(&I)) { + if (const auto *AI = dyn_cast(&I)) { if (AI->isArrayAllocation()) { if (const auto *CI = dyn_cast(AI->getArraySize())) { if (CI->getLimitedValue(SSPBufferSize) >= SSPBufferSize) { @@ -336,7 +336,7 @@ for (Function::iterator I = F->begin(), E = F->end(); I != E;) { BasicBlock *BB = &*I++; - ReturnInst *RI = dyn_cast(BB->getTerminator()); + auto *RI = dyn_cast(BB->getTerminator()); if (!RI) continue; @@ -365,7 +365,7 @@ IRBuilder<> B(RI); LoadInst *Guard = B.CreateLoad(AI, true, "Guard"); CallInst *Call = B.CreateCall(GuardCheck, {Guard}); - llvm::Function *Function = cast(GuardCheck); + auto *Function = cast(GuardCheck); Call->setAttributes(Function->getAttributes()); Call->setCallingConv(Function->getCallingConv()); } else { Index: lib/CodeGen/StackSlotColoring.cpp =================================================================== --- lib/CodeGen/StackSlotColoring.cpp +++ lib/CodeGen/StackSlotColoring.cpp @@ -163,7 +163,7 @@ EE = MI.memoperands_end(); MMOI != EE; ++MMOI) { MachineMemOperand *MMO = *MMOI; - if (const FixedStackPseudoSourceValue *FSV = + if (const auto *FSV = dyn_cast_or_null( MMO->getPseudoValue())) { int FI = FSV->getFrameIndex(); Index: lib/CodeGen/TargetInstrInfo.cpp =================================================================== --- lib/CodeGen/TargetInstrInfo.cpp +++ lib/CodeGen/TargetInstrInfo.cpp @@ -310,7 +310,7 @@ oe = MI.memoperands_end(); o != oe; ++o) { if ((*o)->isLoad()) { - if (const FixedStackPseudoSourceValue *Value = + if (const auto *Value = dyn_cast_or_null( (*o)->getPseudoValue())) { FrameIndex = Value->getFrameIndex(); @@ -329,7 +329,7 @@ oe = MI.memoperands_end(); o != oe; ++o) { if ((*o)->isStore()) { - if (const FixedStackPseudoSourceValue *Value = + if (const auto *Value = dyn_cast_or_null( (*o)->getPseudoValue())) { FrameIndex = Value->getFrameIndex(); Index: lib/CodeGen/TargetLoweringObjectFileImpl.cpp =================================================================== --- lib/CodeGen/TargetLoweringObjectFileImpl.cpp +++ lib/CodeGen/TargetLoweringObjectFileImpl.cpp @@ -65,7 +65,7 @@ MCStreamer &Streamer, const DataLayout &DL, const MCSymbol *Sym) const { SmallString<64> NameData("DW.ref."); NameData += Sym->getName(); - MCSymbolELF *Label = + auto *Label = cast(getContext().getOrCreateSymbol(NameData)); Streamer.EmitSymbolAttribute(Label, MCSA_Hidden); Streamer.EmitSymbolAttribute(Label, MCSA_Weak); @@ -810,7 +810,7 @@ return true; // If it is not dead stripped, it is safe to use private labels. - const MCSectionMachO &SMO = cast(Section); + const auto &SMO = cast(Section); if (SMO.hasAttribute(MachO::S_ATTR_NO_DEAD_STRIP)) return true; Index: lib/CodeGen/UnreachableBlockElim.cpp =================================================================== --- lib/CodeGen/UnreachableBlockElim.cpp +++ lib/CodeGen/UnreachableBlockElim.cpp @@ -53,7 +53,7 @@ if (!Reachable.count(&*I)) { BasicBlock *BB = &*I; DeadBlocks.push_back(BB); - while (PHINode *PN = dyn_cast(BB->begin())) { + while (auto *PN = dyn_cast(BB->begin())) { PN->replaceAllUsesWith(Constant::getNullValue(PN->getType())); BB->getInstList().pop_front(); } Index: lib/CodeGen/WinEHPrepare.cpp =================================================================== --- lib/CodeGen/WinEHPrepare.cpp +++ lib/CodeGen/WinEHPrepare.cpp @@ -137,7 +137,7 @@ assert(TBME.TryLow <= TBME.TryHigh); for (const CatchPadInst *CPI : Handlers) { WinEHHandlerType HT; - Constant *TypeInfo = cast(CPI->getArgOperand(0)); + auto *TypeInfo = cast(CPI->getArgOperand(0)); if (TypeInfo->isNullValue()) HT.TypeDescriptor = nullptr; else @@ -342,9 +342,9 @@ const auto *CatchPad = cast((*CatchSwitch->handler_begin())->getFirstNonPHI()); const BasicBlock *CatchPadBB = CatchPad->getParent(); - const Constant *FilterOrNull = + const auto *FilterOrNull = cast(CatchPad->getArgOperand(0)->stripPointerCasts()); - const Function *Filter = dyn_cast(FilterOrNull); + const auto *Filter = dyn_cast(FilterOrNull); assert((Filter || FilterOrNull->isNullValue()) && "unexpected filter value"); int TryState = addSEHExcept(FuncInfo, ParentState, Filter, CatchPadBB); @@ -897,7 +897,7 @@ // Scan all uses of this instruction to see if it is used outside of its // funclet, and if so, record them in UsesToRename. for (Use &U : OldI->uses()) { - Instruction *UserI = cast(U.getUser()); + auto *UserI = cast(U.getUser()); BasicBlock *UserBB = UserI->getParent(); ColorVector &ColorsForUserBB = BlockColors[UserBB]; assert(!ColorsForUserBB.empty()); @@ -1113,7 +1113,7 @@ Value *InVal; std::tie(EHBlock, InVal) = Worklist.pop_back_val(); - PHINode *PN = dyn_cast(InVal); + auto *PN = dyn_cast(InVal); if (PN && PN->getParent() == EHBlock) { // The value is defined by another PHI we need to remove, with no room to // insert a store after the PHI, so each predecessor needs to store its @@ -1194,7 +1194,7 @@ // br label %PHIBlock // So move the terminators to each others' blocks and swap their // successors. - BranchInst *Goto = cast(IncomingBlock->getTerminator()); + auto *Goto = cast(IncomingBlock->getTerminator()); Goto->removeFromParent(); CatchRet->removeFromParent(); IncomingBlock->getInstList().push_back(CatchRet); Index: lib/DebugInfo/DWARF/DWARFDebugFrame.cpp =================================================================== --- lib/DebugInfo/DWARF/DWARFDebugFrame.cpp +++ lib/DebugInfo/DWARF/DWARFDebugFrame.cpp @@ -444,7 +444,7 @@ void FrameEntry::dumpInstructions(raw_ostream &OS) const { uint64_t CodeAlignmentFactor = 0; int64_t DataAlignmentFactor = 0; - const CIE *Cie = dyn_cast(this); + const auto *Cie = dyn_cast(this); if (!Cie) Cie = cast(this)->getLinkedCIE(); Index: lib/DebugInfo/Symbolize/Symbolize.cpp =================================================================== --- lib/DebugInfo/Symbolize/Symbolize.cpp +++ lib/DebugInfo/Symbolize/Symbolize.cpp @@ -259,7 +259,7 @@ ObjectFile *DbgObj = DbgObjOrErr.get(); if (!DbgObj) continue; - const MachOObjectFile *MachDbgObj = dyn_cast(DbgObj); + const auto *MachDbgObj = dyn_cast(DbgObj); if (!MachDbgObj) continue; if (darwinDsymMatchesBinary(MachDbgObj, MachExeObj)) @@ -338,7 +338,7 @@ if (!Bin) return static_cast(nullptr); - if (MachOUniversalBinary *UB = dyn_cast_or_null(Bin)) { + if (auto *UB = dyn_cast_or_null(Bin)) { const auto &I = ObjectForUBPathAndArch.find(std::make_pair(Path, ArchName)); if (I != ObjectForUBPathAndArch.end()) { return I->second.get(); Index: lib/ExecutionEngine/ExecutionEngine.cpp =================================================================== --- lib/ExecutionEngine/ExecutionEngine.cpp +++ lib/ExecutionEngine/ExecutionEngine.cpp @@ -381,11 +381,11 @@ // Should be an array of '{ i32, void ()* }' structs. The first value is // the init priority, which we ignore. - ConstantArray *InitList = dyn_cast(GV->getInitializer()); + auto *InitList = dyn_cast(GV->getInitializer()); if (!InitList) return; for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i) { - ConstantStruct *CS = dyn_cast(InitList->getOperand(i)); + auto *CS = dyn_cast(InitList->getOperand(i)); if (!CS) continue; Constant *FP = CS->getOperand(1); @@ -393,12 +393,12 @@ continue; // Found a sentinal value, ignore. // Strip off constant expression casts. - if (ConstantExpr *CE = dyn_cast(FP)) + if (auto *CE = dyn_cast(FP)) if (CE->isCast()) FP = CE->getOperand(0); // Execute the ctor/dtor function! - if (Function *F = dyn_cast(FP)) + if (auto *F = dyn_cast(FP)) runFunction(F, None); // FIXME: It is marginally lame that we just do nothing here if we see an @@ -610,7 +610,7 @@ break; case Type::StructTyID: { // if the whole struct is 'undef' just reserve memory for the value. - if(StructType *STy = dyn_cast(C->getType())) { + if(auto *STy = dyn_cast(C->getType())) { unsigned int elemNum = STy->getNumElements(); Result.AggregateVal.resize(elemNum); for (unsigned int i = 0; i < elemNum; ++i) { @@ -642,7 +642,7 @@ } // Otherwise, if the value is a ConstantExpr... - if (const ConstantExpr *CE = dyn_cast(C)) { + if (const auto *CE = dyn_cast(C)) { Constant *Op0 = CE->getOperand(0); switch (CE->getOpcode()) { case Instruction::GetElementPtr: { @@ -906,9 +906,9 @@ case Type::PointerTyID: if (isa(C)) Result.PointerVal = nullptr; - else if (const Function *F = dyn_cast(C)) + else if (const auto *F = dyn_cast(C)) Result = PTOGV(getPointerToFunctionOrStub(const_cast(F))); - else if (const GlobalVariable *GV = dyn_cast(C)) + else if (const auto *GV = dyn_cast(C)) Result = PTOGV(getOrEmitGlobalVariable(const_cast(GV))); else llvm_unreachable("Unknown constant pointer type!"); @@ -916,15 +916,15 @@ case Type::VectorTyID: { unsigned elemNum; Type* ElemTy; - const ConstantDataVector *CDV = dyn_cast(C); - const ConstantVector *CV = dyn_cast(C); - const ConstantAggregateZero *CAZ = dyn_cast(C); + const auto *CDV = dyn_cast(C); + const auto *CV = dyn_cast(C); + const auto *CAZ = dyn_cast(C); if (CDV) { elemNum = CDV->getNumElements(); ElemTy = CDV->getElementType(); } else if (CV || CAZ) { - VectorType* VTy = dyn_cast(C->getType()); + auto * VTy = dyn_cast(C->getType()); elemNum = VTy->getNumElements(); ElemTy = VTy->getElementType(); } else { @@ -1187,7 +1187,7 @@ if (isa(Init)) return; - if (const ConstantVector *CP = dyn_cast(Init)) { + if (const auto *CP = dyn_cast(Init)) { unsigned ElementSize = getDataLayout().getTypeAllocSize(CP->getType()->getElementType()); for (unsigned i = 0, e = CP->getNumOperands(); i != e; ++i) @@ -1200,7 +1200,7 @@ return; } - if (const ConstantArray *CPA = dyn_cast(Init)) { + if (const auto *CPA = dyn_cast(Init)) { unsigned ElementSize = getDataLayout().getTypeAllocSize(CPA->getType()->getElementType()); for (unsigned i = 0, e = CPA->getNumOperands(); i != e; ++i) @@ -1208,7 +1208,7 @@ return; } - if (const ConstantStruct *CPS = dyn_cast(Init)) { + if (const auto *CPS = dyn_cast(Init)) { const StructLayout *SL = getDataLayout().getStructLayout(cast(CPS->getType())); for (unsigned i = 0, e = CPS->getNumOperands(); i != e; ++i) @@ -1216,7 +1216,7 @@ return; } - if (const ConstantDataSequential *CDS = + if (const auto *CDS = dyn_cast(Init)) { // CDS is already laid out in host memory order. StringRef Data = CDS->getRawDataValues(); Index: lib/ExecutionEngine/Interpreter/Execution.cpp =================================================================== --- lib/ExecutionEngine/Interpreter/Execution.cpp +++ lib/ExecutionEngine/Interpreter/Execution.cpp @@ -853,7 +853,7 @@ // Save result... if (!CallingSF.Caller.getType()->isVoidTy()) SetValue(I, Result, CallingSF); - if (InvokeInst *II = dyn_cast (I)) + if (auto *II = dyn_cast (I)) SwitchToNewBasicBlock (II->getNormalDest (), CallingSF); CallingSF.Caller = CallSite(); // We returned from the call... } @@ -937,7 +937,7 @@ // Loop over all of the PHI nodes in the current block, reading their inputs. std::vector ResultValues; - for (; PHINode *PN = dyn_cast(SF.CurInst); ++SF.CurInst) { + for (; auto *PN = dyn_cast(SF.CurInst); ++SF.CurInst) { // Search for the value corresponding to this previous bb... int i = PN->getBasicBlockIndex(PrevBB); assert(i != -1 && "PHINode doesn't contain entry for predecessor??"); @@ -950,7 +950,7 @@ // Now loop over all of the PHI nodes setting their values... SF.CurInst = SF.CurBB->begin(); for (unsigned i = 0; isa(SF.CurInst); ++SF.CurInst, ++i) { - PHINode *PN = cast(SF.CurInst); + auto *PN = cast(SF.CurInst); SetValue(PN, ResultValues[i], SF); } } @@ -1002,7 +1002,7 @@ if (StructType *STy = I.getStructTypeOrNull()) { const StructLayout *SLO = getDataLayout().getStructLayout(STy); - const ConstantInt *CPU = cast(I.getOperand()); + const auto *CPU = cast(I.getOperand()); unsigned Index = unsigned(CPU->getZExtValue()); Total += SLO->getElementOffset(Index); @@ -1227,7 +1227,7 @@ for (unsigned i = 0; i < NumElts; i++) Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.trunc(DBitWidth); } else { - IntegerType *DITy = cast(DstTy); + auto *DITy = cast(DstTy); unsigned DBitWidth = DITy->getBitWidth(); Dest.IntVal = Src.IntVal.trunc(DBitWidth); } @@ -2054,11 +2054,11 @@ } GenericValue Interpreter::getOperandValue(Value *V, ExecutionContext &SF) { - if (ConstantExpr *CE = dyn_cast(V)) { + if (auto *CE = dyn_cast(V)) { return getConstantExprValue(CE, SF); - } else if (Constant *CPV = dyn_cast(V)) { + } else if (auto *CPV = dyn_cast(V)) { return getConstantValue(CPV); - } else if (GlobalValue *GV = dyn_cast(V)) { + } else if (auto *GV = dyn_cast(V)) { return PTOGV(getPointerToGlobal(GV)); } else { return SF.Values[V]; Index: lib/ExecutionEngine/Orc/ExecutionUtils.cpp =================================================================== --- lib/ExecutionEngine/Orc/ExecutionUtils.cpp +++ lib/ExecutionEngine/Orc/ExecutionUtils.cpp @@ -44,7 +44,7 @@ } CtorDtorIterator::Element CtorDtorIterator::operator*() const { - ConstantStruct *CS = dyn_cast(InitList->getOperand(I)); + auto *CS = dyn_cast(InitList->getOperand(I)); assert(CS && "Unrecognized type in llvm.global_ctors/llvm.global_dtors"); Constant *FuncC = CS->getOperand(1); @@ -52,10 +52,10 @@ // Extract function pointer, pulling off any casts. while (FuncC) { - if (Function *F = dyn_cast_or_null(FuncC)) { + if (auto *F = dyn_cast_or_null(FuncC)) { Func = F; break; - } else if (ConstantExpr *CE = dyn_cast_or_null(FuncC)) { + } else if (auto *CE = dyn_cast_or_null(FuncC)) { if (CE->isCast()) FuncC = dyn_cast_or_null(CE->getOperand(0)); else @@ -66,7 +66,7 @@ } } - ConstantInt *Priority = dyn_cast(CS->getOperand(0)); + auto *Priority = dyn_cast(CS->getOperand(0)); Value *Data = CS->getOperand(2); return Element(Priority->getZExtValue(), Func, Data); } Index: lib/IR/AsmWriter.cpp =================================================================== --- lib/IR/AsmWriter.cpp +++ lib/IR/AsmWriter.cpp @@ -76,7 +76,7 @@ if (OM.lookup(V).first) return; - if (const Constant *C = dyn_cast(V)) + if (const auto *C = dyn_cast(V)) if (C->getNumOperands() && !isa(C)) for (const Value *Op : C->operands()) if (!isa(Op) && !isa(Op)) @@ -211,7 +211,7 @@ predictValueUseListOrderImpl(V, F, IDPair.first, OM, Stack); // Recursive descent into constants. - if (const Constant *C = dyn_cast(V)) + if (const auto *C = dyn_cast(V)) if (C->getNumOperands()) // Visit GlobalValues. for (const Value *Op : C->operands()) if (isa(Op)) // Visit GlobalValues. @@ -272,18 +272,18 @@ } static const Module *getModuleFromVal(const Value *V) { - if (const Argument *MA = dyn_cast(V)) + if (const auto *MA = dyn_cast(V)) return MA->getParent() ? MA->getParent()->getParent() : nullptr; - if (const BasicBlock *BB = dyn_cast(V)) + if (const auto *BB = dyn_cast(V)) return BB->getParent() ? BB->getParent()->getParent() : nullptr; - if (const Instruction *I = dyn_cast(V)) { + if (const auto *I = dyn_cast(V)) { const Function *M = I->getParent() ? I->getParent()->getParent() : nullptr; return M ? M->getParent() : nullptr; } - if (const GlobalValue *GV = dyn_cast(V)) + if (const auto *GV = dyn_cast(V)) return GV->getParent(); if (const auto *MAV = dyn_cast(V)) { @@ -487,7 +487,7 @@ return; case Type::FunctionTyID: { - FunctionType *FTy = cast(Ty); + auto *FTy = cast(Ty); print(FTy->getReturnType(), OS); OS << " ("; for (FunctionType::param_iterator I = FTy->param_begin(), @@ -504,7 +504,7 @@ return; } case Type::StructTyID: { - StructType *STy = cast(Ty); + auto *STy = cast(Ty); if (STy->isLiteral()) return printStructBody(STy, OS); @@ -520,7 +520,7 @@ return; } case Type::PointerTyID: { - PointerType *PTy = cast(Ty); + auto *PTy = cast(Ty); print(PTy->getElementType(), OS); if (unsigned AddressSpace = PTy->getAddressSpace()) OS << " addrspace(" << AddressSpace << ')'; @@ -528,14 +528,14 @@ return; } case Type::ArrayTyID: { - ArrayType *ATy = cast(Ty); + auto *ATy = cast(Ty); OS << '[' << ATy->getNumElements() << " x "; print(ATy->getElementType(), OS); OS << ']'; return; } case Type::VectorTyID: { - VectorType *PTy = cast(Ty); + auto *PTy = cast(Ty); OS << "<" << PTy->getNumElements() << " x "; print(PTy->getElementType(), OS); OS << '>'; @@ -737,26 +737,26 @@ } static SlotTracker *createSlotTracker(const Value *V) { - if (const Argument *FA = dyn_cast(V)) + if (const auto *FA = dyn_cast(V)) return new SlotTracker(FA->getParent()); - if (const Instruction *I = dyn_cast(V)) + if (const auto *I = dyn_cast(V)) if (I->getParent()) return new SlotTracker(I->getParent()->getParent()); - if (const BasicBlock *BB = dyn_cast(V)) + if (const auto *BB = dyn_cast(V)) return new SlotTracker(BB->getParent()); - if (const GlobalVariable *GV = dyn_cast(V)) + if (const auto *GV = dyn_cast(V)) return new SlotTracker(GV->getParent()); - if (const GlobalAlias *GA = dyn_cast(V)) + if (const auto *GA = dyn_cast(V)) return new SlotTracker(GA->getParent()); - if (const GlobalIFunc *GIF = dyn_cast(V)) + if (const auto *GIF = dyn_cast(V)) return new SlotTracker(GIF->getParent()); - if (const Function *Func = dyn_cast(V)) + if (const auto *Func = dyn_cast(V)) return new SlotTracker(Func); return nullptr; @@ -867,12 +867,12 @@ // We allow direct calls to any llvm.foo function here, because the // target may not be linked into the optimizer. - if (const CallInst *CI = dyn_cast(&I)) { + if (const auto *CI = dyn_cast(&I)) { // Add all the call attributes to the table. AttributeSet Attrs = CI->getAttributes().getFnAttributes(); if (Attrs.hasAttributes(AttributeSet::FunctionIndex)) CreateAttributeSetSlot(Attrs); - } else if (const InvokeInst *II = dyn_cast(&I)) { + } else if (const auto *II = dyn_cast(&I)) { // Add all the call attributes to the table. AttributeSet Attrs = II->getAttributes().getFnAttributes(); if (Attrs.hasAttributes(AttributeSet::FunctionIndex)) @@ -903,12 +903,12 @@ void SlotTracker::processInstructionMetadata(const Instruction &I) { // Process metadata used directly by intrinsics. - if (const CallInst *CI = dyn_cast(&I)) + if (const auto *CI = dyn_cast(&I)) if (Function *F = CI->getCalledFunction()) if (F->isIntrinsic()) for (auto &Op : I.operands()) if (auto *V = dyn_cast_or_null(Op)) - if (MDNode *N = dyn_cast(V->getMetadata())) + if (auto *N = dyn_cast(V->getMetadata())) CreateMetadataSlot(N); // Process metadata attached to this instruction. @@ -1011,7 +1011,7 @@ // Recursively add any MDNodes referenced by operands. for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) - if (const MDNode *Op = dyn_cast_or_null(N->getOperand(i))) + if (const auto *Op = dyn_cast_or_null(N->getOperand(i))) CreateMetadataSlot(Op); } @@ -1060,7 +1060,7 @@ } static void WriteOptimizationInfo(raw_ostream &Out, const User *U) { - if (const FPMathOperator *FPO = dyn_cast(U)) { + if (const auto *FPO = dyn_cast(U)) { // Unsafe algebra implies all the others, no need to write them all out if (FPO->hasUnsafeAlgebra()) Out << " fast"; @@ -1076,17 +1076,17 @@ } } - if (const OverflowingBinaryOperator *OBO = + if (const auto *OBO = dyn_cast(U)) { if (OBO->hasNoUnsignedWrap()) Out << " nuw"; if (OBO->hasNoSignedWrap()) Out << " nsw"; - } else if (const PossiblyExactOperator *Div = + } else if (const auto *Div = dyn_cast(U)) { if (Div->isExact()) Out << " exact"; - } else if (const GEPOperator *GEP = dyn_cast(U)) { + } else if (const auto *GEP = dyn_cast(U)) { if (GEP->isInBounds()) Out << " inbounds"; } @@ -1096,7 +1096,7 @@ TypePrinting &TypePrinter, SlotTracker *Machine, const Module *Context) { - if (const ConstantInt *CI = dyn_cast(CV)) { + if (const auto *CI = dyn_cast(CV)) { if (CI->getType()->isIntegerTy(1)) { Out << (CI->getZExtValue() ? "true" : "false"); return; @@ -1105,7 +1105,7 @@ return; } - if (const ConstantFP *CFP = dyn_cast(CV)) { + if (const auto *CFP = dyn_cast(CV)) { if (&CFP->getValueAPF().getSemantics() == &APFloat::IEEEsingle || &CFP->getValueAPF().getSemantics() == &APFloat::IEEEdouble) { // We would like to output the FP constant value in exponential notation, @@ -1190,7 +1190,7 @@ return; } - if (const BlockAddress *BA = dyn_cast(CV)) { + if (const auto *BA = dyn_cast(CV)) { Out << "blockaddress("; WriteAsOperandInternal(Out, BA->getFunction(), &TypePrinter, Machine, Context); @@ -1201,7 +1201,7 @@ return; } - if (const ConstantArray *CA = dyn_cast(CV)) { + if (const auto *CA = dyn_cast(CV)) { Type *ETy = CA->getType()->getElementType(); Out << '['; TypePrinter.print(ETy, Out); @@ -1220,7 +1220,7 @@ return; } - if (const ConstantDataArray *CA = dyn_cast(CV)) { + if (const auto *CA = dyn_cast(CV)) { // As a special case, print the array as a string if it is an array of // i8 with ConstantInt values. if (CA->isString()) { @@ -1249,7 +1249,7 @@ } - if (const ConstantStruct *CS = dyn_cast(CV)) { + if (const auto *CS = dyn_cast(CV)) { if (CS->getType()->isPacked()) Out << '<'; Out << '{'; @@ -1312,7 +1312,7 @@ return; } - if (const ConstantExpr *CE = dyn_cast(CV)) { + if (const auto *CE = dyn_cast(CV)) { Out << CE->getOpcodeName(); WriteOptimizationInfo(Out, CE); if (CE->isCompare()) @@ -1321,7 +1321,7 @@ Out << " ("; Optional InRangeOp; - if (const GEPOperator *GEP = dyn_cast(CE)) { + if (const auto *GEP = dyn_cast(CE)) { TypePrinter.print(GEP->getSourceElementType(), Out); Out << ", "; InRangeOp = GEP->getInRangeIndex(); @@ -1930,14 +1930,14 @@ return; } - const Constant *CV = dyn_cast(V); + const auto *CV = dyn_cast(V); if (CV && !isa(CV)) { assert(TypePrinter && "Constants require TypePrinting!"); WriteConstantInternal(Out, CV, *TypePrinter, Machine, Context); return; } - if (const InlineAsm *IA = dyn_cast(V)) { + if (const auto *IA = dyn_cast(V)) { Out << "asm "; if (IA->hasSideEffects()) Out << "sideeffect "; @@ -1964,7 +1964,7 @@ int Slot; // If we have a SlotTracker, use it. if (Machine) { - if (const GlobalValue *GV = dyn_cast(V)) { + if (const auto *GV = dyn_cast(V)) { Slot = Machine->getGlobalSlot(GV); Prefix = '@'; } else { @@ -1981,7 +1981,7 @@ } } else if ((Machine = createSlotTracker(V))) { // Otherwise, create one to get the # and then destroy it. - if (const GlobalValue *GV = dyn_cast(V)) { + if (const auto *GV = dyn_cast(V)) { Slot = Machine->getGlobalSlot(GV); Prefix = '@'; } else { @@ -2003,7 +2003,7 @@ TypePrinting *TypePrinter, SlotTracker *Machine, const Module *Context, bool FromValue) { - if (const MDNode *N = dyn_cast(MD)) { + if (const auto *N = dyn_cast(MD)) { std::unique_ptr MachineStorage; if (!Machine) { MachineStorage = make_unique(Context); @@ -2019,7 +2019,7 @@ return; } - if (const MDString *MDS = dyn_cast(MD)) { + if (const auto *MDS = dyn_cast(MD)) { Out << "!\""; PrintEscapedString(MDS->getString(), Out); Out << '"'; @@ -2821,7 +2821,7 @@ Out << '%' << SlotNum << " = "; } - if (const CallInst *CI = dyn_cast(&I)) { + if (const auto *CI = dyn_cast(&I)) { if (CI->isMustTailCall()) Out << "musttail "; else if (CI->isTailCall()) @@ -2852,11 +2852,11 @@ WriteOptimizationInfo(Out, &I); // Print out the compare instruction predicates - if (const CmpInst *CI = dyn_cast(&I)) + if (const auto *CI = dyn_cast(&I)) Out << ' ' << CmpInst::getPredicateName(CI->getPredicate()); // Print out the atomicrmw operation - if (const AtomicRMWInst *RMWI = dyn_cast(&I)) + if (const auto *RMWI = dyn_cast(&I)) writeAtomicRMWOperation(Out, RMWI->getOperation()); // Print out the type of the operands... @@ -2864,7 +2864,7 @@ // Special case conditional branches to swizzle the condition out to the front if (isa(I) && cast(I).isConditional()) { - const BranchInst &BI(cast(I)); + const auto &BI(cast(I)); Out << ' '; writeOperand(BI.getCondition(), true); Out << ", "; @@ -2873,7 +2873,7 @@ writeOperand(BI.getSuccessor(1), true); } else if (isa(I)) { - const SwitchInst& SI(cast(I)); + const auto & SI(cast(I)); // Special case switch instruction to get formatting nice and correct. Out << ' '; writeOperand(SI.getCondition(), true); @@ -2900,7 +2900,7 @@ writeOperand(I.getOperand(i), true); } Out << ']'; - } else if (const PHINode *PN = dyn_cast(&I)) { + } else if (const auto *PN = dyn_cast(&I)) { Out << ' '; TypePrinter.print(I.getType(), Out); Out << ' '; @@ -2911,18 +2911,18 @@ writeOperand(PN->getIncomingValue(op), false); Out << ", "; writeOperand(PN->getIncomingBlock(op), false); Out << " ]"; } - } else if (const ExtractValueInst *EVI = dyn_cast(&I)) { + } else if (const auto *EVI = dyn_cast(&I)) { Out << ' '; writeOperand(I.getOperand(0), true); for (const unsigned *i = EVI->idx_begin(), *e = EVI->idx_end(); i != e; ++i) Out << ", " << *i; - } else if (const InsertValueInst *IVI = dyn_cast(&I)) { + } else if (const auto *IVI = dyn_cast(&I)) { Out << ' '; writeOperand(I.getOperand(0), true); Out << ", "; writeOperand(I.getOperand(1), true); for (const unsigned *i = IVI->idx_begin(), *e = IVI->idx_end(); i != e; ++i) Out << ", " << *i; - } else if (const LandingPadInst *LPI = dyn_cast(&I)) { + } else if (const auto *LPI = dyn_cast(&I)) { Out << ' '; TypePrinter.print(I.getType(), Out); if (LPI->isCleanup() || LPI->getNumClauses() != 0) @@ -2984,7 +2984,7 @@ writeOperand(CRI->getOperand(1), /*PrintType=*/true); else Out << "to caller"; - } else if (const CallInst *CI = dyn_cast(&I)) { + } else if (const auto *CI = dyn_cast(&I)) { // Print the calling convention being used. if (CI->getCallingConv() != CallingConv::C) { Out << " "; @@ -2992,7 +2992,7 @@ } Operand = CI->getCalledValue(); - FunctionType *FTy = cast(CI->getFunctionType()); + auto *FTy = cast(CI->getFunctionType()); Type *RetTy = FTy->getReturnType(); const AttributeSet &PAL = CI->getAttributes(); @@ -3027,9 +3027,9 @@ writeOperandBundles(CI); - } else if (const InvokeInst *II = dyn_cast(&I)) { + } else if (const auto *II = dyn_cast(&I)) { Operand = II->getCalledValue(); - FunctionType *FTy = cast(II->getFunctionType()); + auto *FTy = cast(II->getFunctionType()); Type *RetTy = FTy->getReturnType(); const AttributeSet &PAL = II->getAttributes(); @@ -3068,7 +3068,7 @@ Out << " unwind "; writeOperand(II->getUnwindDest(), true); - } else if (const AllocaInst *AI = dyn_cast(&I)) { + } else if (const auto *AI = dyn_cast(&I)) { Out << ' '; if (AI->isUsedWithInAlloca()) Out << "inalloca "; @@ -3148,22 +3148,22 @@ } // Print atomic ordering/alignment for memory operations - if (const LoadInst *LI = dyn_cast(&I)) { + if (const auto *LI = dyn_cast(&I)) { if (LI->isAtomic()) writeAtomic(LI->getOrdering(), LI->getSynchScope()); if (LI->getAlignment()) Out << ", align " << LI->getAlignment(); - } else if (const StoreInst *SI = dyn_cast(&I)) { + } else if (const auto *SI = dyn_cast(&I)) { if (SI->isAtomic()) writeAtomic(SI->getOrdering(), SI->getSynchScope()); if (SI->getAlignment()) Out << ", align " << SI->getAlignment(); - } else if (const AtomicCmpXchgInst *CXI = dyn_cast(&I)) { + } else if (const auto *CXI = dyn_cast(&I)) { writeAtomicCmpXchg(CXI->getSuccessOrdering(), CXI->getFailureOrdering(), CXI->getSynchScope()); - } else if (const AtomicRMWInst *RMWI = dyn_cast(&I)) { + } else if (const auto *RMWI = dyn_cast(&I)) { writeAtomic(RMWI->getOrdering(), RMWI->getSynchScope()); - } else if (const FenceInst *FI = dyn_cast(&I)) { + } else if (const auto *FI = dyn_cast(&I)) { writeAtomic(FI->getOrdering(), FI->getSynchScope()); } @@ -3352,7 +3352,7 @@ return; // If the type is a named struct type, print the body as well. - if (StructType *STy = dyn_cast(const_cast(this))) + if (auto *STy = dyn_cast(const_cast(this))) if (!STy->isLiteral()) { OS << " = type "; TP.printStructBody(STy, OS); @@ -3392,25 +3392,25 @@ MST.incorporateFunction(*F); }; - if (const Instruction *I = dyn_cast(this)) { + if (const auto *I = dyn_cast(this)) { incorporateFunction(I->getParent() ? I->getParent()->getParent() : nullptr); AssemblyWriter W(OS, SlotTable, getModuleFromVal(I), nullptr, IsForDebug); W.printInstruction(*I); - } else if (const BasicBlock *BB = dyn_cast(this)) { + } else if (const auto *BB = dyn_cast(this)) { incorporateFunction(BB->getParent()); AssemblyWriter W(OS, SlotTable, getModuleFromVal(BB), nullptr, IsForDebug); W.printBasicBlock(BB); - } else if (const GlobalValue *GV = dyn_cast(this)) { + } else if (const auto *GV = dyn_cast(this)) { AssemblyWriter W(OS, SlotTable, GV->getParent(), nullptr, IsForDebug); - if (const GlobalVariable *V = dyn_cast(GV)) + if (const auto *V = dyn_cast(GV)) W.printGlobal(V); - else if (const Function *F = dyn_cast(GV)) + else if (const auto *F = dyn_cast(GV)) W.printFunction(F); else W.printIndirectSymbol(cast(GV)); - } else if (const MetadataAsValue *V = dyn_cast(this)) { + } else if (const auto *V = dyn_cast(this)) { V->getMetadata()->print(ROS, MST, getModuleFromVal(V)); - } else if (const Constant *C = dyn_cast(this)) { + } else if (const auto *C = dyn_cast(this)) { TypePrinting TypePrinter; TypePrinter.print(C->getType(), OS); OS << ' '; Index: lib/IR/AutoUpgrade.cpp =================================================================== --- lib/IR/AutoUpgrade.cpp +++ lib/IR/AutoUpgrade.cpp @@ -830,8 +830,8 @@ // Lossless i32/float to double conversion. // Extract the bottom elements if necessary and convert to double vector. Value *Src = CI->getArgOperand(0); - VectorType *SrcTy = cast(Src->getType()); - VectorType *DstTy = cast(CI->getType()); + auto *SrcTy = cast(Src->getType()); + auto *DstTy = cast(CI->getType()); Rep = CI->getArgOperand(0); unsigned NumDstElts = DstTy->getNumElements(); @@ -893,7 +893,7 @@ Value *BC = Builder.CreateBitCast(Arg0, PointerType::getUnqual(Arg1->getType()), "cast"); - VectorType *VTy = cast(Arg1->getType()); + auto *VTy = cast(Arg1->getType()); StoreInst *SI = Builder.CreateAlignedStore(Arg1, BC, VTy->getBitWidth() / 8); SI->setMetadata(M->getMDKindID("nontemporal"), Node); @@ -1033,8 +1033,8 @@ Name.startswith("avx2.pmovzx") || Name.startswith("avx512.mask.pmovsx") || Name.startswith("avx512.mask.pmovzx"))) { - VectorType *SrcTy = cast(CI->getArgOperand(0)->getType()); - VectorType *DstTy = cast(CI->getType()); + auto *SrcTy = cast(CI->getArgOperand(0)->getType()); + auto *DstTy = cast(CI->getType()); unsigned NumDstElts = DstTy->getNumElements(); // Extract a subvector of the first NumDstElts lanes and sign/zero extend. @@ -1127,7 +1127,7 @@ Value *Op0 = CI->getArgOperand(0); Value *Op1 = CI->getArgOperand(1); unsigned Imm = cast (CI->getArgOperand(2))->getZExtValue(); - VectorType *VecTy = cast(CI->getType()); + auto *VecTy = cast(CI->getType()); unsigned NumElts = VecTy->getNumElements(); SmallVector Idxs(NumElts); @@ -1140,7 +1140,7 @@ Value *Op0 = CI->getArgOperand(0); Value *Op1 = CI->getArgOperand(1); unsigned Imm = cast(CI->getArgOperand(2))->getZExtValue(); - VectorType *VecTy = cast(CI->getType()); + auto *VecTy = cast(CI->getType()); unsigned NumElts = VecTy->getNumElements(); // Mask off the high bits of the immediate value; hardware ignores those. @@ -1177,7 +1177,7 @@ Name == "avx2.vextracti128")) { Value *Op0 = CI->getArgOperand(0); unsigned Imm = cast(CI->getArgOperand(1))->getZExtValue(); - VectorType *VecTy = cast(CI->getType()); + auto *VecTy = cast(CI->getType()); unsigned NumElts = VecTy->getNumElements(); // Mask off the high bits of the immediate value; hardware ignores those. @@ -1197,7 +1197,7 @@ Name.startswith("avx512.mask.perm.di."))) { Value *Op0 = CI->getArgOperand(0); unsigned Imm = cast(CI->getArgOperand(1))->getZExtValue(); - VectorType *VecTy = cast(CI->getType()); + auto *VecTy = cast(CI->getType()); unsigned NumElts = VecTy->getNumElements(); SmallVector Idxs(NumElts); @@ -1215,7 +1215,7 @@ Name.startswith("avx512.mask.pshuf.d."))) { Value *Op0 = CI->getArgOperand(0); unsigned Imm = cast(CI->getArgOperand(1))->getZExtValue(); - VectorType *VecTy = cast(CI->getType()); + auto *VecTy = cast(CI->getType()); unsigned NumElts = VecTy->getNumElements(); // Calculate the size of each index in the immediate. unsigned IdxSize = 64 / VecTy->getScalarSizeInBits(); @@ -1368,7 +1368,7 @@ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, CI->getArgOperand(2)); } else if (IsX86 && Name.startswith("avx512.mask.and.")) { - VectorType *FTy = cast(CI->getType()); + auto *FTy = cast(CI->getType()); VectorType *ITy = VectorType::getInteger(FTy); Rep = Builder.CreateAnd(Builder.CreateBitCast(CI->getArgOperand(0), ITy), Builder.CreateBitCast(CI->getArgOperand(1), ITy)); @@ -1376,7 +1376,7 @@ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, CI->getArgOperand(2)); } else if (IsX86 && Name.startswith("avx512.mask.andn.")) { - VectorType *FTy = cast(CI->getType()); + auto *FTy = cast(CI->getType()); VectorType *ITy = VectorType::getInteger(FTy); Rep = Builder.CreateNot(Builder.CreateBitCast(CI->getArgOperand(0), ITy)); Rep = Builder.CreateAnd(Rep, @@ -1385,7 +1385,7 @@ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, CI->getArgOperand(2)); } else if (IsX86 && Name.startswith("avx512.mask.or.")) { - VectorType *FTy = cast(CI->getType()); + auto *FTy = cast(CI->getType()); VectorType *ITy = VectorType::getInteger(FTy); Rep = Builder.CreateOr(Builder.CreateBitCast(CI->getArgOperand(0), ITy), Builder.CreateBitCast(CI->getArgOperand(1), ITy)); @@ -1393,7 +1393,7 @@ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, CI->getArgOperand(2)); } else if (IsX86 && Name.startswith("avx512.mask.xor.")) { - VectorType *FTy = cast(CI->getType()); + auto *FTy = cast(CI->getType()); VectorType *ITy = VectorType::getInteger(FTy); Rep = Builder.CreateXor(Builder.CreateBitCast(CI->getArgOperand(0), ITy), Builder.CreateBitCast(CI->getArgOperand(1), ITy)); @@ -1429,7 +1429,7 @@ Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, CI->getArgOperand(2)); } else if (IsX86 && Name.startswith("avx512.mask.pshuf.b.")) { - VectorType *VecTy = cast(CI->getType()); + auto *VecTy = cast(CI->getType()); Intrinsic::ID IID; if (VecTy->getPrimitiveSizeInBits() == 128) IID = Intrinsic::x86_ssse3_pshuf_b_128; @@ -1719,7 +1719,7 @@ case Intrinsic::x86_xop_vpermil2ps_256: { SmallVector Args(CI->arg_operands().begin(), CI->arg_operands().end()); - VectorType *FltIdxTy = cast(Args[2]->getType()); + auto *FltIdxTy = cast(Args[2]->getType()); VectorType *IntIdxTy = VectorType::getInteger(FltIdxTy); Args[2] = Builder.CreateBitCast(Args[2], IntIdxTy); CI->replaceAllUsesWith(Builder.CreateCall(NewFn, Args, Name)); @@ -1801,7 +1801,7 @@ // Replace all users of the old function with the new function or new // instructions. This is not a range loop because the call is deleted. for (auto UI = F->user_begin(), UE = F->user_end(); UI != UE; ) - if (CallInst *CI = dyn_cast(*UI++)) + if (auto *CI = dyn_cast(*UI++)) UpgradeIntrinsicCall(CI, NewFn); // Remove old function, no longer used, from the module. @@ -1898,7 +1898,7 @@ MDNode *Op = ModFlags->getOperand(I); if (Op->getNumOperands() < 2) continue; - MDString *ID = dyn_cast_or_null(Op->getOperand(1)); + auto *ID = dyn_cast_or_null(Op->getOperand(1)); if (!ID) continue; if (ID->getString() == "Objective-C Image Info Version") Index: lib/IR/BasicBlock.cpp =================================================================== --- lib/IR/BasicBlock.cpp +++ lib/IR/BasicBlock.cpp @@ -73,7 +73,7 @@ Constant *Replacement = ConstantInt::get(llvm::Type::getInt32Ty(getContext()), 1); while (!use_empty()) { - BlockAddress *BA = cast(user_back()); + auto *BA = cast(user_back()); BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement, BA->getType())); BA->destroyConstant(); @@ -134,7 +134,7 @@ CallInst *BasicBlock::getTerminatingMustTailCall() { if (InstList.empty()) return nullptr; - ReturnInst *RI = dyn_cast(&InstList.back()); + auto *RI = dyn_cast(&InstList.back()); if (!RI || RI == &InstList.front()) return nullptr; @@ -285,7 +285,7 @@ "removePredecessor: BB is not a predecessor!"); if (InstList.empty()) return; - PHINode *APN = dyn_cast(&front()); + auto *APN = dyn_cast(&front()); if (!APN) return; // Quick exit. // If there are exactly two predecessors, then we want to nuke the PHI nodes @@ -312,7 +312,7 @@ // <= Two predecessors BEFORE I remove one? if (max_idx <= 2 && !DontDeleteUselessPHIs) { // Yup, loop through and nuke the PHI nodes - while (PHINode *PN = dyn_cast(&front())) { + while (auto *PN = dyn_cast(&front())) { // Remove the predecessor first. PN->removeIncomingValue(Pred, !DontDeleteUselessPHIs); @@ -421,7 +421,7 @@ // N.B. Succ might not be a complete BasicBlock, so don't assume // that it ends with a non-phi instruction. for (iterator II = Succ->begin(), IE = Succ->end(); II != IE; ++II) { - PHINode *PN = dyn_cast(II); + auto *PN = dyn_cast(II); if (!PN) break; int i; Index: lib/IR/ConstantFold.cpp =================================================================== --- lib/IR/ConstantFold.cpp +++ lib/IR/ConstantFold.cpp @@ -104,8 +104,8 @@ // Check to see if we are casting a pointer to an aggregate to a pointer to // the first element. If so, return the appropriate GEP instruction. - if (PointerType *PTy = dyn_cast(V->getType())) - if (PointerType *DPTy = dyn_cast(DestTy)) + if (auto *PTy = dyn_cast(V->getType())) + if (auto *DPTy = dyn_cast(DestTy)) if (PTy->getAddressSpace() == DPTy->getAddressSpace() && PTy->getElementType()->isSized()) { SmallVector IdxList; @@ -114,11 +114,11 @@ IdxList.push_back(Zero); Type *ElTy = PTy->getElementType(); while (ElTy != DPTy->getElementType()) { - if (StructType *STy = dyn_cast(ElTy)) { + if (auto *STy = dyn_cast(ElTy)) { if (STy->getNumElements() == 0) break; ElTy = STy->getElementType(0); IdxList.push_back(Zero); - } else if (SequentialType *STy = + } else if (auto *STy = dyn_cast(ElTy)) { ElTy = STy->getElementType(); IdxList.push_back(Zero); @@ -135,8 +135,8 @@ // Handle casts from one vector constant to another. We know that the src // and dest type have the same size (otherwise its an illegal cast). - if (VectorType *DestPTy = dyn_cast(DestTy)) { - if (VectorType *SrcTy = dyn_cast(V->getType())) { + if (auto *DestPTy = dyn_cast(DestTy)) { + if (auto *SrcTy = dyn_cast(V->getType())) { assert(DestPTy->getBitWidth() == SrcTy->getBitWidth() && "Not cast between same sized vectors!"); SrcTy = nullptr; @@ -161,7 +161,7 @@ return ConstantPointerNull::get(cast(DestTy)); // Handle integral constant input. - if (ConstantInt *CI = dyn_cast(V)) { + if (auto *CI = dyn_cast(V)) { if (DestTy->isIntegerTy()) // Integral -> Integral. This is a no-op because the bit widths must // be the same. Consequently, we just fold to V. @@ -178,7 +178,7 @@ } // Handle ConstantFP input: FP -> Integral. - if (ConstantFP *FP = dyn_cast(V)) { + if (auto *FP = dyn_cast(V)) { // PPC_FP128 is really the sum of two consecutive doubles, where the first // double is always stored first in memory, regardless of the target // endianness. The memory layout of i128, however, depends on the target @@ -219,7 +219,7 @@ assert(ByteSize != CSize && "Should not extract everything"); // Constant Integers are simple. - if (ConstantInt *CI = dyn_cast(C)) { + if (auto *CI = dyn_cast(C)) { APInt V = CI->getValue(); if (ByteStart) V = V.lshr(ByteStart*8); @@ -229,7 +229,7 @@ // In the input is a constant expr, we might be able to recursively simplify. // If not, we definitely can't do anything. - ConstantExpr *CE = dyn_cast(C); + auto *CE = dyn_cast(C); if (!CE) return nullptr; switch (CE->getOpcode()) { @@ -240,7 +240,7 @@ return nullptr; // X | -1 -> -1. - if (ConstantInt *RHSC = dyn_cast(RHS)) + if (auto *RHSC = dyn_cast(RHS)) if (RHSC->isAllOnesValue()) return RHSC; @@ -264,7 +264,7 @@ return ConstantExpr::getAnd(LHS, RHS); } case Instruction::LShr: { - ConstantInt *Amt = dyn_cast(CE->getOperand(1)); + auto *Amt = dyn_cast(CE->getOperand(1)); if (!Amt) return nullptr; unsigned ShAmt = Amt->getZExtValue(); @@ -286,7 +286,7 @@ } case Instruction::Shl: { - ConstantInt *Amt = dyn_cast(CE->getOperand(1)); + auto *Amt = dyn_cast(CE->getOperand(1)); if (!Amt) return nullptr; unsigned ShAmt = Amt->getZExtValue(); @@ -349,13 +349,13 @@ /// top-level folder. static Constant *getFoldedSizeOf(Type *Ty, Type *DestTy, bool Folded) { - if (ArrayType *ATy = dyn_cast(Ty)) { + if (auto *ATy = dyn_cast(Ty)) { Constant *N = ConstantInt::get(DestTy, ATy->getNumElements()); Constant *E = getFoldedSizeOf(ATy->getElementType(), DestTy, true); return ConstantExpr::getNUWMul(E, N); } - if (StructType *STy = dyn_cast(Ty)) + if (auto *STy = dyn_cast(Ty)) if (!STy->isPacked()) { unsigned NumElems = STy->getNumElements(); // An empty struct has size zero. @@ -379,7 +379,7 @@ // Pointer size doesn't depend on the pointee type, so canonicalize them // to an arbitrary pointee. - if (PointerType *PTy = dyn_cast(Ty)) + if (auto *PTy = dyn_cast(Ty)) if (!PTy->getElementType()->isIntegerTy(1)) return getFoldedSizeOf(PointerType::get(IntegerType::get(PTy->getContext(), 1), @@ -407,7 +407,7 @@ bool Folded) { // The alignment of an array is equal to the alignment of the // array element. Note that this is not always true for vectors. - if (ArrayType *ATy = dyn_cast(Ty)) { + if (auto *ATy = dyn_cast(Ty)) { Constant *C = ConstantExpr::getAlignOf(ATy->getElementType()); C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, DestTy, @@ -416,7 +416,7 @@ return C; } - if (StructType *STy = dyn_cast(Ty)) { + if (auto *STy = dyn_cast(Ty)) { // Packed structs always have an alignment of 1. if (STy->isPacked()) return ConstantInt::get(DestTy, 1); @@ -443,7 +443,7 @@ // Pointer alignment doesn't depend on the pointee type, so canonicalize them // to an arbitrary pointee. - if (PointerType *PTy = dyn_cast(Ty)) + if (auto *PTy = dyn_cast(Ty)) if (!PTy->getElementType()->isIntegerTy(1)) return getFoldedAlignOf(PointerType::get(IntegerType::get(PTy->getContext(), @@ -471,7 +471,7 @@ static Constant *getFoldedOffsetOf(Type *Ty, Constant *FieldNo, Type *DestTy, bool Folded) { - if (ArrayType *ATy = dyn_cast(Ty)) { + if (auto *ATy = dyn_cast(Ty)) { Constant *N = ConstantExpr::getCast(CastInst::getCastOpcode(FieldNo, false, DestTy, false), FieldNo, DestTy); @@ -479,7 +479,7 @@ return ConstantExpr::getNUWMul(E, N); } - if (StructType *STy = dyn_cast(Ty)) + if (auto *STy = dyn_cast(Ty)) if (!STy->isPacked()) { unsigned NumElems = STy->getNumElements(); // An empty struct has no members. @@ -536,7 +536,7 @@ // If the cast operand is a constant expression, there's a few things we can // do to try to simplify it. - if (ConstantExpr *CE = dyn_cast(V)) { + if (auto *CE = dyn_cast(V)) { if (CE->isCast()) { // Try hard to fold cast of cast because they are often eliminable. if (unsigned newOpc = foldConstantCastPair(opc, CE, DestTy)) @@ -569,7 +569,7 @@ DestTy->isVectorTy() && DestTy->getVectorNumElements() == V->getType()->getVectorNumElements()) { SmallVector res; - VectorType *DestVecTy = cast(DestTy); + auto *DestVecTy = cast(DestTy); Type *DstEltTy = DestVecTy->getElementType(); Type *Ty = IntegerType::get(V->getContext(), 32); for (unsigned i = 0, e = V->getType()->getVectorNumElements(); i != e; ++i) { @@ -587,7 +587,7 @@ llvm_unreachable("Failed to cast constant expression"); case Instruction::FPTrunc: case Instruction::FPExt: - if (ConstantFP *FPC = dyn_cast(V)) { + if (auto *FPC = dyn_cast(V)) { bool ignored; APFloat Val = FPC->getValueAPF(); Val.convert(DestTy->isHalfTy() ? APFloat::IEEEhalf : @@ -603,7 +603,7 @@ return nullptr; // Can't fold. case Instruction::FPToUI: case Instruction::FPToSI: - if (ConstantFP *FPC = dyn_cast(V)) { + if (auto *FPC = dyn_cast(V)) { const APFloat &V = FPC->getValueAPF(); bool ignored; uint64_t x[2]; @@ -630,10 +630,10 @@ // If this is a sizeof-like expression, pull out multiplications by // known factors to expose them to subsequent folding. If it's an // alignof-like expression, factor out known factors. - if (ConstantExpr *CE = dyn_cast(V)) + if (auto *CE = dyn_cast(V)) if (CE->getOpcode() == Instruction::GetElementPtr && CE->getOperand(0)->isNullValue()) { - GEPOperator *GEPO = cast(CE); + auto *GEPO = cast(CE); Type *Ty = GEPO->getSourceElementType(); if (CE->getNumOperands() == 2) { // Handle a sizeof-like expression. @@ -648,9 +648,9 @@ } else if (CE->getNumOperands() == 3 && CE->getOperand(1)->isNullValue()) { // Handle an alignof-like expression. - if (StructType *STy = dyn_cast(Ty)) + if (auto *STy = dyn_cast(Ty)) if (!STy->isPacked()) { - ConstantInt *CI = cast(CE->getOperand(2)); + auto *CI = cast(CE->getOperand(2)); if (CI->isOne() && STy->getNumElements() == 2 && STy->getElementType(0)->isIntegerTy(1)) { @@ -669,7 +669,7 @@ return nullptr; case Instruction::UIToFP: case Instruction::SIToFP: - if (ConstantInt *CI = dyn_cast(V)) { + if (auto *CI = dyn_cast(V)) { const APInt &api = CI->getValue(); APFloat apf(DestTy->getFltSemantics(), APInt::getNullValue(DestTy->getPrimitiveSizeInBits())); @@ -684,14 +684,14 @@ } return nullptr; case Instruction::ZExt: - if (ConstantInt *CI = dyn_cast(V)) { + if (auto *CI = dyn_cast(V)) { uint32_t BitWidth = cast(DestTy)->getBitWidth(); return ConstantInt::get(V->getContext(), CI->getValue().zext(BitWidth)); } return nullptr; case Instruction::SExt: - if (ConstantInt *CI = dyn_cast(V)) { + if (auto *CI = dyn_cast(V)) { uint32_t BitWidth = cast(DestTy)->getBitWidth(); return ConstantInt::get(V->getContext(), CI->getValue().sext(BitWidth)); @@ -702,7 +702,7 @@ return nullptr; uint32_t DestBitWidth = cast(DestTy)->getBitWidth(); - if (ConstantInt *CI = dyn_cast(V)) { + if (auto *CI = dyn_cast(V)) { return ConstantInt::get(V->getContext(), CI->getValue().trunc(DestBitWidth)); } @@ -731,7 +731,7 @@ if (Cond->isAllOnesValue()) return V1; // If the condition is a vector constant, fold the result elementwise. - if (ConstantVector *CondV = dyn_cast(Cond)) { + if (auto *CondV = dyn_cast(Cond)) { SmallVector Result; Type *Ty = IntegerType::get(CondV->getContext(), 32); for (unsigned i = 0, e = V1->getType()->getVectorNumElements(); i != e;++i){ @@ -740,7 +740,7 @@ ConstantInt::get(Ty, i)); Constant *V2Element = ConstantExpr::getExtractElement(V2, ConstantInt::get(Ty, i)); - Constant *Cond = dyn_cast(CondV->getOperand(i)); + auto *Cond = dyn_cast(CondV->getOperand(i)); if (V1Element == V2Element) { V = V1Element; } else if (isa(Cond)) { @@ -765,12 +765,12 @@ if (isa(V2)) return V1; if (V1 == V2) return V1; - if (ConstantExpr *TrueVal = dyn_cast(V1)) { + if (auto *TrueVal = dyn_cast(V1)) { if (TrueVal->getOpcode() == Instruction::Select) if (TrueVal->getOperand(0) == Cond) return ConstantExpr::getSelect(Cond, TrueVal->getOperand(1), V2); } - if (ConstantExpr *FalseVal = dyn_cast(V2)) { + if (auto *FalseVal = dyn_cast(V2)) { if (FalseVal->getOpcode() == Instruction::Select) if (FalseVal->getOperand(0) == Cond) return ConstantExpr::getSelect(Cond, V1, FalseVal->getOperand(2)); @@ -789,7 +789,7 @@ if (isa(Idx)) return UndefValue::get(Val->getType()->getVectorElementType()); - if (ConstantInt *CIdx = dyn_cast(Idx)) { + if (auto *CIdx = dyn_cast(Idx)) { // ee({w,x,y,z}, wrong_value) -> undef if (CIdx->uge(Val->getType()->getVectorNumElements())) return UndefValue::get(Val->getType()->getVectorElementType()); @@ -804,7 +804,7 @@ if (isa(Idx)) return UndefValue::get(Val->getType()); - ConstantInt *CIdx = dyn_cast(Idx); + auto *CIdx = dyn_cast(Idx); if (!CIdx) return nullptr; unsigned NumElts = Val->getType()->getVectorNumElements(); @@ -889,7 +889,7 @@ return Val; unsigned NumElts; - if (StructType *ST = dyn_cast(Agg->getType())) + if (auto *ST = dyn_cast(Agg->getType())) NumElts = ST->getNumElements(); else NumElts = cast(Agg->getType())->getNumElements(); @@ -905,9 +905,9 @@ Result.push_back(C); } - if (StructType *ST = dyn_cast(Agg->getType())) + if (auto *ST = dyn_cast(Agg->getType())) return ConstantStruct::get(ST, Result); - if (ArrayType *AT = dyn_cast(Agg->getType())) + if (auto *AT = dyn_cast(Agg->getType())) return ConstantArray::get(AT, Result); return ConstantVector::get(Result); } @@ -1016,7 +1016,7 @@ "Unexpected UndefValue"); // Handle simplifications when the RHS is a constant int. - if (ConstantInt *CI2 = dyn_cast(C2)) { + if (auto *CI2 = dyn_cast(C2)) { switch (Opcode) { case Instruction::Add: if (CI2->equalsInt(0)) return C1; // X + 0 == X @@ -1048,7 +1048,7 @@ if (CI2->isAllOnesValue()) return C1; // X & -1 == X - if (ConstantExpr *CE1 = dyn_cast(C1)) { + if (auto *CE1 = dyn_cast(C1)) { // (zext i32 to i64) & 4294967295 -> (zext i32 to i64) if (CE1->getOpcode() == Instruction::ZExt) { unsigned DstWidth = CI2->getType()->getBitWidth(); @@ -1062,7 +1062,7 @@ // If and'ing the address of a global with a constant, fold it. if (CE1->getOpcode() == Instruction::PtrToInt && isa(CE1->getOperand(0))) { - GlobalValue *GV = cast(CE1->getOperand(0)); + auto *GV = cast(CE1->getOperand(0)); // Functions are at least 4-byte aligned. unsigned GVAlign = GV->getAlignment(); @@ -1089,7 +1089,7 @@ case Instruction::Xor: if (CI2->equalsInt(0)) return C1; // X ^ 0 == X - if (ConstantExpr *CE1 = dyn_cast(C1)) { + if (auto *CE1 = dyn_cast(C1)) { switch (CE1->getOpcode()) { default: break; case Instruction::ICmp: @@ -1105,7 +1105,7 @@ break; case Instruction::AShr: // ashr (zext C to Ty), C2 -> lshr (zext C, CSA), C2 - if (ConstantExpr *CE1 = dyn_cast(C1)) + if (auto *CE1 = dyn_cast(C1)) if (CE1->getOpcode() == Instruction::ZExt) // Top bits known zero. return ConstantExpr::getLShr(C1, C2); break; @@ -1116,8 +1116,8 @@ return ConstantExpr::get(Opcode, C2, C1); } - if (ConstantInt *CI1 = dyn_cast(C1)) { - if (ConstantInt *CI2 = dyn_cast(C2)) { + if (auto *CI1 = dyn_cast(C1)) { + if (auto *CI2 = dyn_cast(C2)) { const APInt &C1V = CI1->getValue(); const APInt &C2V = CI2->getValue(); switch (Opcode) { @@ -1179,8 +1179,8 @@ default: break; } - } else if (ConstantFP *CFP1 = dyn_cast(C1)) { - if (ConstantFP *CFP2 = dyn_cast(C2)) { + } else if (auto *CFP1 = dyn_cast(C1)) { + if (auto *CFP2 = dyn_cast(C2)) { const APFloat &C1V = CFP1->getValueAPF(); const APFloat &C2V = CFP2->getValueAPF(); APFloat C3V = C1V; // copy for modification @@ -1204,7 +1204,7 @@ return ConstantFP::get(C1->getContext(), C3V); } } - } else if (VectorType *VTy = dyn_cast(C1->getType())) { + } else if (auto *VTy = dyn_cast(C1->getType())) { // Perform elementwise folding. SmallVector Result; Type *Ty = IntegerType::get(VTy->getContext(), 32); @@ -1220,7 +1220,7 @@ return ConstantVector::get(Result); } - if (ConstantExpr *CE1 = dyn_cast(C1)) { + if (auto *CE1 = dyn_cast(C1)) { // There are many possible foldings we could do here. We should probably // at least fold add of a pointer with an integer into the appropriate // getelementptr. This will improve alias analysis a bit. @@ -1275,7 +1275,7 @@ /// This type is zero-sized if it's an array or structure of zero-sized types. /// The only leaf zero-sized type is an empty structure. static bool isMaybeZeroSizedType(Type *Ty) { - if (StructType *STy = dyn_cast(Ty)) { + if (auto *STy = dyn_cast(Ty)) { if (STy->isOpaque()) return true; // Can't say. // If all of elements have zero size, this does too. @@ -1283,7 +1283,7 @@ if (!isMaybeZeroSizedType(STy->getElementType(i))) return false; return true; - } else if (ArrayType *ATy = dyn_cast(Ty)) { + } else if (auto *ATy = dyn_cast(Ty)) { return isMaybeZeroSizedType(ATy->getElementType()); } return false; @@ -1375,7 +1375,7 @@ } else { // Ok, the LHS is known to be a constantexpr. The RHS can be any of a // constantexpr or a simple constant. - ConstantExpr *CE1 = cast(V1); + auto *CE1 = cast(V1); switch (CE1->getOpcode()) { case Instruction::FPTrunc: case Instruction::FPExt: @@ -1464,7 +1464,7 @@ if (SwappedRelation != ICmpInst::BAD_ICMP_PREDICATE) return ICmpInst::getSwappedPredicate(SwappedRelation); - } else if (const GlobalValue *GV = dyn_cast(V1)) { + } else if (const auto *GV = dyn_cast(V1)) { if (isa(V2)) { // Swap as necessary. ICmpInst::Predicate SwappedRelation = evaluateICmpRelation(V2, V1, isSigned); @@ -1476,7 +1476,7 @@ // Now we know that the RHS is a GlobalValue, BlockAddress or simple // constant (which, since the types must match, means that it's a // ConstantPointerNull). - if (const GlobalValue *GV2 = dyn_cast(V2)) { + if (const auto *GV2 = dyn_cast(V2)) { return areGlobalsPotentiallyEqual(GV, GV2); } else if (isa(V2)) { return ICmpInst::ICMP_NE; // Globals never equal labels. @@ -1487,7 +1487,7 @@ if (!GV->hasExternalWeakLinkage() && !isa(GV)) return ICmpInst::ICMP_NE; } - } else if (const BlockAddress *BA = dyn_cast(V1)) { + } else if (const auto *BA = dyn_cast(V1)) { if (isa(V2)) { // Swap as necessary. ICmpInst::Predicate SwappedRelation = evaluateICmpRelation(V2, V1, isSigned); @@ -1499,7 +1499,7 @@ // Now we know that the RHS is a GlobalValue, BlockAddress or simple // constant (which, since the types must match, means that it is a // ConstantPointerNull). - if (const BlockAddress *BA2 = dyn_cast(V2)) { + if (const auto *BA2 = dyn_cast(V2)) { // Block address in another function can't equal this one, but block // addresses in the current function might be the same if blocks are // empty. @@ -1514,7 +1514,7 @@ } else { // Ok, the LHS is known to be a constantexpr. The RHS can be any of a // constantexpr, a global, block address, or a simple constant. - ConstantExpr *CE1 = cast(V1); + auto *CE1 = cast(V1); Constant *CE1Op0 = CE1->getOperand(0); switch (CE1->getOpcode()) { @@ -1547,13 +1547,13 @@ break; case Instruction::GetElementPtr: { - GEPOperator *CE1GEP = cast(CE1); + auto *CE1GEP = cast(CE1); // Ok, since this is a getelementptr, we know that the constant has a // pointer type. Check the various cases. if (isa(V2)) { // If we are comparing a GEP to a null pointer, check to see if the base // of the GEP equals the null pointer. - if (const GlobalValue *GV = dyn_cast(CE1Op0)) { + if (const auto *GV = dyn_cast(CE1Op0)) { if (GV->hasExternalWeakLinkage()) // Weak linkage GVals could be zero or not. We're comparing that // to null pointer so its greater-or-equal @@ -1573,7 +1573,7 @@ return ICmpInst::ICMP_EQ; } // Otherwise, we can't really say if the first operand is null or not. - } else if (const GlobalValue *GV2 = dyn_cast(V2)) { + } else if (const auto *GV2 = dyn_cast(V2)) { if (isa(CE1Op0)) { if (GV2->hasExternalWeakLinkage()) // Weak linkage GVals could be zero or not. We're comparing it to @@ -1583,7 +1583,7 @@ // If its not weak linkage, the GVal must have a non-zero address // so the result is less-than return isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; - } else if (const GlobalValue *GV = dyn_cast(CE1Op0)) { + } else if (const auto *GV = dyn_cast(CE1Op0)) { if (GV == GV2) { // If this is a getelementptr of the same global, then it must be // different. Because the types must match, the getelementptr could @@ -1600,7 +1600,7 @@ } } } else { - ConstantExpr *CE2 = cast(V2); + auto *CE2 = cast(V2); Constant *CE2Op0 = CE2->getOperand(0); // There are MANY other foldings that we could perform here. They will @@ -1613,7 +1613,7 @@ if (isa(CE1Op0) && isa(CE2Op0)) { // Don't know relative ordering, but check for inequality. if (CE1Op0 != CE2Op0) { - GEPOperator *CE2GEP = cast(CE2); + auto *CE2GEP = cast(CE2); if (CE1GEP->hasAllZeroIndices() && CE2GEP->hasAllZeroIndices()) return areGlobalsPotentiallyEqual(cast(CE1Op0), cast(CE2Op0)); @@ -1676,7 +1676,7 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred, Constant *C1, Constant *C2) { Type *ResultTy; - if (VectorType *VT = dyn_cast(C1->getType())) + if (auto *VT = dyn_cast(C1->getType())) ResultTy = VectorType::get(Type::getInt1Ty(C1->getContext()), VT->getNumElements()); else @@ -1711,7 +1711,7 @@ // icmp eq/ne(null,GV) -> false/true if (C1->isNullValue()) { - if (const GlobalValue *GV = dyn_cast(C2)) + if (const auto *GV = dyn_cast(C2)) // Don't try to evaluate aliases. External weak GV can be null. if (!isa(GV) && !GV->hasExternalWeakLinkage()) { if (pred == ICmpInst::ICMP_EQ) @@ -1721,7 +1721,7 @@ } // icmp eq/ne(GV,null) -> false/true } else if (C2->isNullValue()) { - if (const GlobalValue *GV = dyn_cast(C1)) + if (const auto *GV = dyn_cast(C1)) // Don't try to evaluate aliases. External weak GV can be null. if (!isa(GV) && !GV->hasExternalWeakLinkage()) { if (pred == ICmpInst::ICMP_EQ) @@ -1957,7 +1957,7 @@ // If the right hand side is a bitcast, try using its inverse to simplify // it by moving it to the left hand side. We can't do this if it would turn // a vector compare into a scalar compare or visa versa. - if (ConstantExpr *CE2 = dyn_cast(C2)) { + if (auto *CE2 = dyn_cast(C2)) { Constant *CE2Op0 = CE2->getOperand(0); if (CE2->getOpcode() == Instruction::BitCast && CE2->getType()->isVectorTy() == CE2Op0->getType()->isVectorTy()) { @@ -1967,7 +1967,7 @@ } // If the left hand side is an extension, try eliminating it. - if (ConstantExpr *CE1 = dyn_cast(C1)) { + if (auto *CE1 = dyn_cast(C1)) { if ((CE1->getOpcode() == Instruction::SExt && ICmpInst::isSigned((ICmpInst::Predicate)pred)) || (CE1->getOpcode() == Instruction::ZExt && @@ -2037,7 +2037,7 @@ Optional InRangeIndex, ArrayRef Idxs) { if (Idxs.empty()) return C; - Constant *Idx0 = cast(Idxs[0]); + auto *Idx0 = cast(Idxs[0]); if ((Idxs.size() == 1 && Idx0->isNullValue())) return C; @@ -2055,18 +2055,18 @@ break; } if (isNull) { - PointerType *PtrTy = cast(C->getType()->getScalarType()); + auto *PtrTy = cast(C->getType()->getScalarType()); Type *Ty = GetElementPtrInst::getIndexedType(PointeeTy, Idxs); assert(Ty && "Invalid indices for GEP!"); Type *GEPTy = PointerType::get(Ty, PtrTy->getAddressSpace()); - if (VectorType *VT = dyn_cast(C->getType())) + if (auto *VT = dyn_cast(C->getType())) GEPTy = VectorType::get(GEPTy, VT->getNumElements()); return Constant::getNullValue(GEPTy); } } - if (ConstantExpr *CE = dyn_cast(C)) { + if (auto *CE = dyn_cast(C)) { // Combine Indices - If the source pointer to this getelementptr instruction // is a getelementptr instruction, combine the indices of the two // getelementptr instructions into a single instruction. @@ -2099,7 +2099,7 @@ if (Idx0->isNullValue()) PerformFold = true; else if (LastI.isSequential()) - if (ConstantInt *CI = dyn_cast(Idx0)) + if (auto *CI = dyn_cast(Idx0)) PerformFold = !LastI.isBoundedSequential() || isIndexInRangeOfArrayType(LastI.getSequentialNumElements(), CI); @@ -2159,13 +2159,13 @@ // // Don't fold if the cast is changing address spaces. if (CE->isCast() && Idxs.size() > 1 && Idx0->isNullValue()) { - PointerType *SrcPtrTy = + auto *SrcPtrTy = dyn_cast(CE->getOperand(0)->getType()); - PointerType *DstPtrTy = dyn_cast(CE->getType()); + auto *DstPtrTy = dyn_cast(CE->getType()); if (SrcPtrTy && DstPtrTy) { - ArrayType *SrcArrayTy = + auto *SrcArrayTy = dyn_cast(SrcPtrTy->getElementType()); - ArrayType *DstArrayTy = + auto *DstArrayTy = dyn_cast(DstPtrTy->getElementType()); if (SrcArrayTy && DstArrayTy && SrcArrayTy->getElementType() == DstArrayTy->getElementType() @@ -2231,7 +2231,7 @@ ConstantInt *Factor = ConstantInt::get(CI->getType(), NumElements); NewIdxs[i] = ConstantExpr::getSRem(CI, Factor); - Constant *PrevIdx = cast(Idxs[i - 1]); + auto *PrevIdx = cast(Idxs[i - 1]); Constant *Div = ConstantExpr::getSDiv(CI, Factor); unsigned CommonExtendedWidth = Index: lib/IR/Constants.cpp =================================================================== --- lib/IR/Constants.cpp +++ lib/IR/Constants.cpp @@ -43,17 +43,17 @@ bool Constant::isNegativeZeroValue() const { // Floating point values have an explicit -0.0 value. - if (const ConstantFP *CFP = dyn_cast(this)) + if (const auto *CFP = dyn_cast(this)) return CFP->isZero() && CFP->isNegative(); // Equivalent for a vector of -0.0's. - if (const ConstantDataVector *CV = dyn_cast(this)) - if (ConstantFP *SplatCFP = dyn_cast_or_null(CV->getSplatValue())) + if (const auto *CV = dyn_cast(this)) + if (auto *SplatCFP = dyn_cast_or_null(CV->getSplatValue())) if (SplatCFP && SplatCFP->isZero() && SplatCFP->isNegative()) return true; - if (const ConstantVector *CV = dyn_cast(this)) - if (ConstantFP *SplatCFP = dyn_cast_or_null(CV->getSplatValue())) + if (const auto *CV = dyn_cast(this)) + if (auto *SplatCFP = dyn_cast_or_null(CV->getSplatValue())) if (SplatCFP && SplatCFP->isZero() && SplatCFP->isNegative()) return true; @@ -69,17 +69,17 @@ // zero (floating point), or a null value. bool Constant::isZeroValue() const { // Floating point values have an explicit -0.0 value. - if (const ConstantFP *CFP = dyn_cast(this)) + if (const auto *CFP = dyn_cast(this)) return CFP->isZero(); // Equivalent for a vector of -0.0's. - if (const ConstantDataVector *CV = dyn_cast(this)) - if (ConstantFP *SplatCFP = dyn_cast_or_null(CV->getSplatValue())) + if (const auto *CV = dyn_cast(this)) + if (auto *SplatCFP = dyn_cast_or_null(CV->getSplatValue())) if (SplatCFP && SplatCFP->isZero()) return true; - if (const ConstantVector *CV = dyn_cast(this)) - if (ConstantFP *SplatCFP = dyn_cast_or_null(CV->getSplatValue())) + if (const auto *CV = dyn_cast(this)) + if (auto *SplatCFP = dyn_cast_or_null(CV->getSplatValue())) if (SplatCFP && SplatCFP->isZero()) return true; @@ -89,11 +89,11 @@ bool Constant::isNullValue() const { // 0 is null. - if (const ConstantInt *CI = dyn_cast(this)) + if (const auto *CI = dyn_cast(this)) return CI->isZero(); // +0.0 is null. - if (const ConstantFP *CFP = dyn_cast(this)) + if (const auto *CFP = dyn_cast(this)) return CFP->isZero() && !CFP->isNegative(); // constant zero is zero for aggregates, cpnull is null for pointers, none for @@ -104,20 +104,20 @@ bool Constant::isAllOnesValue() const { // Check for -1 integers - if (const ConstantInt *CI = dyn_cast(this)) + if (const auto *CI = dyn_cast(this)) return CI->isMinusOne(); // Check for FP which are bitcasted from -1 integers - if (const ConstantFP *CFP = dyn_cast(this)) + if (const auto *CFP = dyn_cast(this)) return CFP->getValueAPF().bitcastToAPInt().isAllOnesValue(); // Check for constant vectors which are splats of -1 values. - if (const ConstantVector *CV = dyn_cast(this)) + if (const auto *CV = dyn_cast(this)) if (Constant *Splat = CV->getSplatValue()) return Splat->isAllOnesValue(); // Check for constant vectors which are splats of -1 values. - if (const ConstantDataVector *CV = dyn_cast(this)) + if (const auto *CV = dyn_cast(this)) if (Constant *Splat = CV->getSplatValue()) return Splat->isAllOnesValue(); @@ -126,20 +126,20 @@ bool Constant::isOneValue() const { // Check for 1 integers - if (const ConstantInt *CI = dyn_cast(this)) + if (const auto *CI = dyn_cast(this)) return CI->isOne(); // Check for FP which are bitcasted from 1 integers - if (const ConstantFP *CFP = dyn_cast(this)) + if (const auto *CFP = dyn_cast(this)) return CFP->getValueAPF().bitcastToAPInt() == 1; // Check for constant vectors which are splats of 1 values. - if (const ConstantVector *CV = dyn_cast(this)) + if (const auto *CV = dyn_cast(this)) if (Constant *Splat = CV->getSplatValue()) return Splat->isOneValue(); // Check for constant vectors which are splats of 1 values. - if (const ConstantDataVector *CV = dyn_cast(this)) + if (const auto *CV = dyn_cast(this)) if (Constant *Splat = CV->getSplatValue()) return Splat->isOneValue(); @@ -148,20 +148,20 @@ bool Constant::isMinSignedValue() const { // Check for INT_MIN integers - if (const ConstantInt *CI = dyn_cast(this)) + if (const auto *CI = dyn_cast(this)) return CI->isMinValue(/*isSigned=*/true); // Check for FP which are bitcasted from INT_MIN integers - if (const ConstantFP *CFP = dyn_cast(this)) + if (const auto *CFP = dyn_cast(this)) return CFP->getValueAPF().bitcastToAPInt().isMinSignedValue(); // Check for constant vectors which are splats of INT_MIN values. - if (const ConstantVector *CV = dyn_cast(this)) + if (const auto *CV = dyn_cast(this)) if (Constant *Splat = CV->getSplatValue()) return Splat->isMinSignedValue(); // Check for constant vectors which are splats of INT_MIN values. - if (const ConstantDataVector *CV = dyn_cast(this)) + if (const auto *CV = dyn_cast(this)) if (Constant *Splat = CV->getSplatValue()) return Splat->isMinSignedValue(); @@ -170,20 +170,20 @@ bool Constant::isNotMinSignedValue() const { // Check for INT_MIN integers - if (const ConstantInt *CI = dyn_cast(this)) + if (const auto *CI = dyn_cast(this)) return !CI->isMinValue(/*isSigned=*/true); // Check for FP which are bitcasted from INT_MIN integers - if (const ConstantFP *CFP = dyn_cast(this)) + if (const auto *CFP = dyn_cast(this)) return !CFP->getValueAPF().bitcastToAPInt().isMinSignedValue(); // Check for constant vectors which are splats of INT_MIN values. - if (const ConstantVector *CV = dyn_cast(this)) + if (const auto *CV = dyn_cast(this)) if (Constant *Splat = CV->getSplatValue()) return Splat->isNotMinSignedValue(); // Check for constant vectors which are splats of INT_MIN values. - if (const ConstantDataVector *CV = dyn_cast(this)) + if (const auto *CV = dyn_cast(this)) if (Constant *Splat = CV->getSplatValue()) return Splat->isNotMinSignedValue(); @@ -236,18 +236,18 @@ Constant *C = ConstantInt::get(Ty->getContext(), V); // Convert an integer to a pointer, if necessary. - if (PointerType *PTy = dyn_cast(ScalarTy)) + if (auto *PTy = dyn_cast(ScalarTy)) C = ConstantExpr::getIntToPtr(C, PTy); // Broadcast a scalar to a vector, if necessary. - if (VectorType *VTy = dyn_cast(Ty)) + if (auto *VTy = dyn_cast(Ty)) C = ConstantVector::getSplat(VTy->getNumElements(), C); return C; } Constant *Constant::getAllOnesValue(Type *Ty) { - if (IntegerType *ITy = dyn_cast(Ty)) + if (auto *ITy = dyn_cast(Ty)) return ConstantInt::get(Ty->getContext(), APInt::getAllOnesValue(ITy->getBitWidth())); @@ -257,22 +257,22 @@ return ConstantFP::get(Ty->getContext(), FL); } - VectorType *VTy = cast(Ty); + auto *VTy = cast(Ty); return ConstantVector::getSplat(VTy->getNumElements(), getAllOnesValue(VTy->getElementType())); } Constant *Constant::getAggregateElement(unsigned Elt) const { - if (const ConstantAggregate *CC = dyn_cast(this)) + if (const auto *CC = dyn_cast(this)) return Elt < CC->getNumOperands() ? CC->getOperand(Elt) : nullptr; - if (const ConstantAggregateZero *CAZ = dyn_cast(this)) + if (const auto *CAZ = dyn_cast(this)) return Elt < CAZ->getNumElements() ? CAZ->getElementValue(Elt) : nullptr; - if (const UndefValue *UV = dyn_cast(this)) + if (const auto *UV = dyn_cast(this)) return Elt < UV->getNumElements() ? UV->getElementValue(Elt) : nullptr; - if (const ConstantDataSequential *CDS =dyn_cast(this)) + if (const auto *CDS =dyn_cast(this)) return Elt < CDS->getNumElements() ? CDS->getElementAsConstant(Elt) : nullptr; return nullptr; @@ -280,7 +280,7 @@ Constant *Constant::getAggregateElement(Constant *Elt) const { assert(isa(Elt->getType()) && "Index must be an integer"); - if (ConstantInt *CI = dyn_cast(Elt)) + if (auto *CI = dyn_cast(Elt)) return getAggregateElement(CI->getZExtValue()); return nullptr; } @@ -329,13 +329,13 @@ SmallPtrSetImpl &NonTrappingOps) { assert(C->getType()->isFirstClassType() && "Cannot evaluate aggregate vals!"); // The only thing that could possibly trap are constant exprs. - const ConstantExpr *CE = dyn_cast(C); + const auto *CE = dyn_cast(C); if (!CE) return false; // ConstantExpr traps if any operands can trap. for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) { - if (ConstantExpr *Op = dyn_cast(CE->getOperand(i))) { + if (auto *Op = dyn_cast(CE->getOperand(i))) { if (NonTrappingOps.insert(Op).second && canTrapImpl(Op, NonTrappingOps)) return true; } @@ -376,7 +376,7 @@ if (Predicate(GV)) return true; for (const Value *Op : WorkItem->operands()) { - const Constant *ConstOp = dyn_cast(Op); + const auto *ConstOp = dyn_cast(Op); if (!ConstOp) continue; if (Visited.insert(ConstOp).second) @@ -402,7 +402,7 @@ bool Constant::isConstantUsed() const { for (const User *U : users()) { - const Constant *UC = dyn_cast(U); + const auto *UC = dyn_cast(U); if (!UC || isa(UC)) return true; @@ -416,17 +416,17 @@ if (isa(this)) return true; // Global reference. - if (const BlockAddress *BA = dyn_cast(this)) + if (const auto *BA = dyn_cast(this)) return BA->getFunction()->needsRelocation(); // While raw uses of blockaddress need to be relocated, differences between // two of them don't when they are for labels in the same function. This is a // common idiom when creating a table for the indirect goto extension, so we // handle it efficiently here. - if (const ConstantExpr *CE = dyn_cast(this)) + if (const auto *CE = dyn_cast(this)) if (CE->getOpcode() == Instruction::Sub) { - ConstantExpr *LHS = dyn_cast(CE->getOperand(0)); - ConstantExpr *RHS = dyn_cast(CE->getOperand(1)); + auto *LHS = dyn_cast(CE->getOperand(0)); + auto *RHS = dyn_cast(CE->getOperand(1)); if (LHS && RHS && LHS->getOpcode() == Instruction::PtrToInt && RHS->getOpcode() == Instruction::PtrToInt && isa(LHS->getOperand(0)) && @@ -449,7 +449,7 @@ if (isa(C)) return false; // Cannot remove this while (!C->use_empty()) { - const Constant *User = dyn_cast(C->user_back()); + const auto *User = dyn_cast(C->user_back()); if (!User) return false; // Non-constant usage; if (!removeDeadUsersOfConstant(User)) return false; // Constant wasn't dead @@ -464,7 +464,7 @@ Value::const_user_iterator I = user_begin(), E = user_end(); Value::const_user_iterator LastNonDeadUser = E; while (I != E) { - const Constant *User = dyn_cast(*I); + const auto *User = dyn_cast(*I); if (!User) { LastNonDeadUser = I; ++I; @@ -518,7 +518,7 @@ } Constant *ConstantInt::getTrue(Type *Ty) { - VectorType *VTy = dyn_cast(Ty); + auto *VTy = dyn_cast(Ty); if (!VTy) { assert(Ty->isIntegerTy(1) && "True must be i1 or vector of i1."); return ConstantInt::getTrue(Ty->getContext()); @@ -530,7 +530,7 @@ } Constant *ConstantInt::getFalse(Type *Ty) { - VectorType *VTy = dyn_cast(Ty); + auto *VTy = dyn_cast(Ty); if (!VTy) { assert(Ty->isIntegerTy(1) && "False must be i1 or vector of i1."); return ConstantInt::getFalse(Ty->getContext()); @@ -559,7 +559,7 @@ Constant *C = get(cast(Ty->getScalarType()), V, isSigned); // For vectors, broadcast the value. - if (VectorType *VTy = dyn_cast(Ty)) + if (auto *VTy = dyn_cast(Ty)) return ConstantVector::getSplat(VTy->getNumElements(), C); return C; @@ -583,7 +583,7 @@ "ConstantInt type doesn't match the type implied by its value!"); // For vectors, broadcast the value. - if (VectorType *VTy = dyn_cast(Ty)) + if (auto *VTy = dyn_cast(Ty)) return ConstantVector::getSplat(VTy->getNumElements(), C); return C; @@ -630,7 +630,7 @@ Constant *C = get(Context, FV); // For vectors, broadcast the value. - if (VectorType *VTy = dyn_cast(Ty)) + if (auto *VTy = dyn_cast(Ty)) return ConstantVector::getSplat(VTy->getNumElements(), C); return C; @@ -644,7 +644,7 @@ Constant *C = get(Context, FV); // For vectors, broadcast the value. - if (VectorType *VTy = dyn_cast(Ty)) + if (auto *VTy = dyn_cast(Ty)) return ConstantVector::getSplat(VTy->getNumElements(), C); return C; @@ -655,7 +655,7 @@ APFloat NaN = APFloat::getNaN(Semantics, Negative, Type); Constant *C = get(Ty->getContext(), NaN); - if (VectorType *VTy = dyn_cast(Ty)) + if (auto *VTy = dyn_cast(Ty)) return ConstantVector::getSplat(VTy->getNumElements(), C); return C; @@ -666,7 +666,7 @@ APFloat NegZero = APFloat::getZero(Semantics, /*Negative=*/true); Constant *C = get(Ty->getContext(), NegZero); - if (VectorType *VTy = dyn_cast(Ty)) + if (auto *VTy = dyn_cast(Ty)) return ConstantVector::getSplat(VTy->getNumElements(), C); return C; @@ -714,7 +714,7 @@ const fltSemantics &Semantics = *TypeToFloatSemantics(Ty->getScalarType()); Constant *C = get(Ty->getContext(), APFloat::getInf(Semantics, Negative)); - if (VectorType *VTy = dyn_cast(Ty)) + if (auto *VTy = dyn_cast(Ty)) return ConstantVector::getSplat(VTy->getNumElements(), C); return C; @@ -843,7 +843,7 @@ // We speculatively build the elements here even if it turns out that there is // a constantexpr or something else weird, since it is so uncommon for that to // happen. - if (ConstantInt *CI = dyn_cast(C)) { + if (auto *CI = dyn_cast(C)) { if (CI->getType()->isIntegerTy(8)) return getIntSequenceIfElementsMatch(V); else if (CI->getType()->isIntegerTy(16)) @@ -852,7 +852,7 @@ return getIntSequenceIfElementsMatch(V); else if (CI->getType()->isIntegerTy(64)) return getIntSequenceIfElementsMatch(V); - } else if (ConstantFP *CFP = dyn_cast(C)) { + } else if (auto *CFP = dyn_cast(C)) { if (CFP->getType()->isHalfTy()) return getFPSequenceIfElementsMatch(V); else if (CFP->getType()->isFloatTy()) @@ -1074,7 +1074,7 @@ // The remaining indices must be compile-time known integers within the // bounds of the corresponding notional static array types. for (; GEPI != E; ++GEPI, ++OI) { - ConstantInt *CI = dyn_cast(*OI); + auto *CI = dyn_cast(*OI); if (GEPI.isBoundedSequential() && (CI->getValue().getActiveBits() > 64 || CI->getZExtValue() >= GEPI.getSequentialNumElements())) @@ -1091,7 +1091,7 @@ } ArrayRef ConstantExpr::getIndices() const { - if (const ExtractValueConstantExpr *EVCE = + if (const auto *EVCE = dyn_cast(this)) return EVCE->Indices; @@ -1289,9 +1289,9 @@ assert(this->getType()->isVectorTy() && "Only valid for vectors!"); if (isa(this)) return getNullValue(this->getType()->getVectorElementType()); - if (const ConstantDataVector *CV = dyn_cast(this)) + if (const auto *CV = dyn_cast(this)) return CV->getSplatValue(); - if (const ConstantVector *CV = dyn_cast(this)) + if (const auto *CV = dyn_cast(this)) return CV->getSplatValue(); return nullptr; } @@ -1307,7 +1307,7 @@ } const APInt &Constant::getUniqueInteger() const { - if (const ConstantInt *CI = dyn_cast(this)) + if (const auto *CI = dyn_cast(this)) return CI->getValue(); assert(this->getSplatValue() && "Doesn't contain a unique integer!"); const Constant *C = this->getAggregateElement(0U); @@ -1709,12 +1709,12 @@ // Canonicalize addrspacecasts between different pointer types by first // bitcasting the pointer type and then converting the address space. - PointerType *SrcScalarTy = cast(C->getType()->getScalarType()); - PointerType *DstScalarTy = cast(DstTy->getScalarType()); + auto *SrcScalarTy = cast(C->getType()->getScalarType()); + auto *DstScalarTy = cast(DstTy->getScalarType()); Type *DstElemTy = DstScalarTy->getElementType(); if (SrcScalarTy->getElementType() != DstElemTy) { Type *MidTy = PointerType::get(DstElemTy, SrcScalarTy->getAddressSpace()); - if (VectorType *VT = dyn_cast(DstTy)) { + if (auto *VT = dyn_cast(DstTy)) { // Handle vectors of pointers. MidTy = VectorType::get(MidTy, VT->getNumElements()); } @@ -1927,7 +1927,7 @@ Idxs[i]->getType()->getVectorNumElements() == NumVecElts) && "getelementptr index type missmatch"); - Constant *Idx = cast(Idxs[i]); + auto *Idx = cast(Idxs[i]); if (NumVecElts && !Idxs[i]->getType()->isVectorTy()) Idx = ConstantVector::getSplat(NumVecElts, Idx); ArgVec.push_back(Idx); @@ -1961,7 +1961,7 @@ const ConstantExprKeyType Key(Instruction::ICmp, ArgVec, pred); Type *ResultTy = Type::getInt1Ty(LHS->getContext()); - if (VectorType *VT = dyn_cast(LHS->getType())) + if (auto *VT = dyn_cast(LHS->getType())) ResultTy = VectorType::get(ResultTy, VT->getNumElements()); LLVMContextImpl *pImpl = LHS->getType()->getContext().pImpl; @@ -1985,7 +1985,7 @@ const ConstantExprKeyType Key(Instruction::FCmp, ArgVec, pred); Type *ResultTy = Type::getInt1Ty(LHS->getContext()); - if (VectorType *VT = dyn_cast(LHS->getType())) + if (auto *VT = dyn_cast(LHS->getType())) ResultTy = VectorType::get(ResultTy, VT->getNumElements()); LLVMContextImpl *pImpl = LHS->getType()->getContext().pImpl; @@ -2310,7 +2310,7 @@ } unsigned ConstantDataSequential::getNumElements() const { - if (ArrayType *AT = dyn_cast(getType())) + if (auto *AT = dyn_cast(getType())) return AT->getNumElements(); return getType()->getVectorNumElements(); } @@ -2541,7 +2541,7 @@ Constant *ConstantDataVector::getSplat(unsigned NumElts, Constant *V) { assert(isElementTypeCompatible(V->getType()) && "Element type not compatible with ConstantData"); - if (ConstantInt *CI = dyn_cast(V)) { + if (auto *CI = dyn_cast(V)) { if (CI->getType()->isIntegerTy(8)) { SmallVector Elts(NumElts, CI->getZExtValue()); return get(V->getContext(), Elts); @@ -2559,7 +2559,7 @@ return get(V->getContext(), Elts); } - if (ConstantFP *CFP = dyn_cast(V)) { + if (auto *CFP = dyn_cast(V)) { if (CFP->getType()->isHalfTy()) { SmallVector Elts( NumElts, CFP->getValueAPF().bitcastToAPInt().getLimitedValue()); @@ -2717,7 +2717,7 @@ Value *ConstantArray::handleOperandChangeImpl(Value *From, Value *To) { assert(isa(To) && "Cannot make Constant refer to non-constant!"); - Constant *ToC = cast(To); + auto *ToC = cast(To); SmallVector Values; Values.reserve(getNumOperands()); // Build replacement array. @@ -2731,7 +2731,7 @@ Use *OperandList = getOperandList(); unsigned OperandNo = 0; for (Use *O = OperandList, *E = OperandList+getNumOperands(); O != E; ++O) { - Constant *Val = cast(O->get()); + auto *Val = cast(O->get()); if (Val == From) { OperandNo = (O - OperandList); Val = ToC; @@ -2758,7 +2758,7 @@ Value *ConstantStruct::handleOperandChangeImpl(Value *From, Value *To) { assert(isa(To) && "Cannot make Constant refer to non-constant!"); - Constant *ToC = cast(To); + auto *ToC = cast(To); Use *OperandList = getOperandList(); @@ -2771,7 +2771,7 @@ bool AllSame = true; unsigned OperandNo = 0; for (Use *O = OperandList, *E = OperandList + getNumOperands(); O != E; ++O) { - Constant *Val = cast(O->get()); + auto *Val = cast(O->get()); if (Val == From) { OperandNo = (O - OperandList); Val = ToC; @@ -2794,7 +2794,7 @@ Value *ConstantVector::handleOperandChangeImpl(Value *From, Value *To) { assert(isa(To) && "Cannot make Constant refer to non-constant!"); - Constant *ToC = cast(To); + auto *ToC = cast(To); SmallVector Values; Values.reserve(getNumOperands()); // Build replacement array... @@ -2820,7 +2820,7 @@ Value *ConstantExpr::handleOperandChangeImpl(Value *From, Value *ToV) { assert(isa(ToV) && "Cannot make Constant refer to non-constant!"); - Constant *To = cast(ToV); + auto *To = cast(ToV); SmallVector NewOps; unsigned NumUpdated = 0; Index: lib/IR/Core.cpp =================================================================== --- lib/IR/Core.cpp +++ lib/IR/Core.cpp @@ -682,7 +682,7 @@ assert((isa(MD) || isa(MD)) && "Expected a metadata node or a canonicalized constant"); - if (MDNode *N = dyn_cast(MD)) + if (auto *N = dyn_cast(MD)) return N; return MDNode::get(MAV->getContext(), MD); @@ -804,7 +804,7 @@ } LLVMBool LLVMIsNull(LLVMValueRef Val) { - if (Constant *C = dyn_cast(unwrap(Val))) + if (auto *C = dyn_cast(unwrap(Val))) return C->isNullValue(); return false; } @@ -863,7 +863,7 @@ const char *LLVMGetMDString(LLVMValueRef V, unsigned *Length) { if (const auto *MD = dyn_cast(unwrap(V))) - if (const MDString *S = dyn_cast(MD->getMetadata())) { + if (const auto *S = dyn_cast(MD->getMetadata())) { *Length = S->getString().size(); return S->getString().data(); } @@ -1043,7 +1043,7 @@ LLVMValueRef *ConstantVals, unsigned Count) { Constant **Elements = unwrap(ConstantVals, Count); - StructType *Ty = cast(unwrap(StructTy)); + auto *Ty = cast(unwrap(StructTy)); return wrap(ConstantStruct::get(Ty, makeArrayRef(Elements, Count))); } @@ -1581,13 +1581,13 @@ unsigned LLVMGetAlignment(LLVMValueRef V) { Value *P = unwrap(V); - if (GlobalValue *GV = dyn_cast(P)) + if (auto *GV = dyn_cast(P)) return GV->getAlignment(); - if (AllocaInst *AI = dyn_cast(P)) + if (auto *AI = dyn_cast(P)) return AI->getAlignment(); - if (LoadInst *LI = dyn_cast(P)) + if (auto *LI = dyn_cast(P)) return LI->getAlignment(); - if (StoreInst *SI = dyn_cast(P)) + if (auto *SI = dyn_cast(P)) return SI->getAlignment(); llvm_unreachable( @@ -1596,13 +1596,13 @@ void LLVMSetAlignment(LLVMValueRef V, unsigned Bytes) { Value *P = unwrap(V); - if (GlobalObject *GV = dyn_cast(P)) + if (auto *GV = dyn_cast(P)) GV->setAlignment(Bytes); - else if (AllocaInst *AI = dyn_cast(P)) + else if (auto *AI = dyn_cast(P)) AI->setAlignment(Bytes); - else if (LoadInst *LI = dyn_cast(P)) + else if (auto *LI = dyn_cast(P)) LI->setAlignment(Bytes); - else if (StoreInst *SI = dyn_cast(P)) + else if (auto *SI = dyn_cast(P)) SI->setAlignment(Bytes); else llvm_unreachable( @@ -1811,7 +1811,7 @@ } unsigned LLVMGetIntrinsicID(LLVMValueRef Fn) { - if (Function *F = dyn_cast(unwrap(Fn))) + if (auto *F = dyn_cast(unwrap(Fn))) return F->getIntrinsicID(); return 0; } @@ -2116,31 +2116,31 @@ } LLVMIntPredicate LLVMGetICmpPredicate(LLVMValueRef Inst) { - if (ICmpInst *I = dyn_cast(unwrap(Inst))) + if (auto *I = dyn_cast(unwrap(Inst))) return (LLVMIntPredicate)I->getPredicate(); - if (ConstantExpr *CE = dyn_cast(unwrap(Inst))) + if (auto *CE = dyn_cast(unwrap(Inst))) if (CE->getOpcode() == Instruction::ICmp) return (LLVMIntPredicate)CE->getPredicate(); return (LLVMIntPredicate)0; } LLVMRealPredicate LLVMGetFCmpPredicate(LLVMValueRef Inst) { - if (FCmpInst *I = dyn_cast(unwrap(Inst))) + if (auto *I = dyn_cast(unwrap(Inst))) return (LLVMRealPredicate)I->getPredicate(); - if (ConstantExpr *CE = dyn_cast(unwrap(Inst))) + if (auto *CE = dyn_cast(unwrap(Inst))) if (CE->getOpcode() == Instruction::FCmp) return (LLVMRealPredicate)CE->getPredicate(); return (LLVMRealPredicate)0; } LLVMOpcode LLVMGetInstructionOpcode(LLVMValueRef Inst) { - if (Instruction *C = dyn_cast(unwrap(Inst))) + if (auto *C = dyn_cast(unwrap(Inst))) return map_to_llvmopcode(C->getOpcode()); return (LLVMOpcode)0; } LLVMValueRef LLVMInstructionClone(LLVMValueRef Inst) { - if (Instruction *C = dyn_cast(unwrap(Inst))) + if (auto *C = dyn_cast(unwrap(Inst))) return wrap(C->clone()); return nullptr; } @@ -2790,14 +2790,14 @@ LLVMBool LLVMGetVolatile(LLVMValueRef MemAccessInst) { Value *P = unwrap(MemAccessInst); - if (LoadInst *LI = dyn_cast(P)) + if (auto *LI = dyn_cast(P)) return LI->isVolatile(); return cast(P)->isVolatile(); } void LLVMSetVolatile(LLVMValueRef MemAccessInst, LLVMBool isVolatile) { Value *P = unwrap(MemAccessInst); - if (LoadInst *LI = dyn_cast(P)) + if (auto *LI = dyn_cast(P)) return LI->setVolatile(isVolatile); return cast(P)->setVolatile(isVolatile); } @@ -2805,7 +2805,7 @@ LLVMAtomicOrdering LLVMGetOrdering(LLVMValueRef MemAccessInst) { Value *P = unwrap(MemAccessInst); AtomicOrdering O; - if (LoadInst *LI = dyn_cast(P)) + if (auto *LI = dyn_cast(P)) O = LI->getOrdering(); else O = cast(P)->getOrdering(); @@ -2816,7 +2816,7 @@ Value *P = unwrap(MemAccessInst); AtomicOrdering O = mapFromLLVMOrdering(Ordering); - if (LoadInst *LI = dyn_cast(P)) + if (auto *LI = dyn_cast(P)) return LI->setOrdering(O); return cast(P)->setOrdering(O); } @@ -3055,7 +3055,7 @@ LLVMBool LLVMIsAtomicSingleThread(LLVMValueRef AtomicInst) { Value *P = unwrap(AtomicInst); - if (AtomicRMWInst *I = dyn_cast(P)) + if (auto *I = dyn_cast(P)) return I->getSynchScope() == SingleThread; return cast(P)->getSynchScope() == SingleThread; } @@ -3064,7 +3064,7 @@ Value *P = unwrap(AtomicInst); SynchronizationScope Sync = NewValue ? SingleThread : CrossThread; - if (AtomicRMWInst *I = dyn_cast(P)) + if (auto *I = dyn_cast(P)) return I->setSynchScope(Sync); return cast(P)->setSynchScope(Sync); } Index: lib/IR/DataLayout.cpp =================================================================== --- lib/IR/DataLayout.cpp +++ lib/IR/DataLayout.cpp @@ -716,7 +716,7 @@ "Expected a pointer or pointer vector type."); unsigned NumBits = getPointerTypeSizeInBits(Ty); IntegerType *IntTy = IntegerType::get(Ty->getContext(), NumBits); - if (VectorType *VecTy = dyn_cast(Ty)) + if (auto *VecTy = dyn_cast(Ty)) return VectorType::get(IntTy, VecTy->getNumElements()); return IntTy; } Index: lib/IR/Dominators.cpp =================================================================== --- lib/IR/Dominators.cpp +++ lib/IR/Dominators.cpp @@ -198,9 +198,9 @@ assert(BBE.isSingleEdge() && "This function is not efficient in handling multiple edges"); - Instruction *UserInst = cast(U.getUser()); + auto *UserInst = cast(U.getUser()); // A PHI in the end of the edge is dominated by it. - PHINode *PN = dyn_cast(UserInst); + auto *PN = dyn_cast(UserInst); if (PN && PN->getParent() == BBE.getEnd() && PN->getIncomingBlock(U) == BBE.getStart()) return true; @@ -216,14 +216,14 @@ } bool DominatorTree::dominates(const Instruction *Def, const Use &U) const { - Instruction *UserInst = cast(U.getUser()); + auto *UserInst = cast(U.getUser()); const BasicBlock *DefBB = Def->getParent(); // Determine the block in which the use happens. PHI nodes use // their operands on edges; simulate this by thinking of the use // happening at the end of the predecessor block. const BasicBlock *UseBB; - if (PHINode *PN = dyn_cast(UserInst)) + if (auto *PN = dyn_cast(UserInst)) UseBB = PN->getIncomingBlock(U); else UseBB = UserInst->getParent(); @@ -241,7 +241,7 @@ // Among other things, this means they don't dominate anything in // their own block, except possibly a phi, so we don't need to // walk the block in any case. - if (const InvokeInst *II = dyn_cast(Def)) { + if (const auto *II = dyn_cast(Def)) { BasicBlock *NormalDest = II->getNormalDest(); BasicBlockEdge E(DefBB, NormalDest); return dominates(E, U); @@ -267,14 +267,14 @@ } bool DominatorTree::isReachableFromEntry(const Use &U) const { - Instruction *I = dyn_cast(U.getUser()); + auto *I = dyn_cast(U.getUser()); // ConstantExprs aren't really reachable from the entry block, but they // don't need to be treated like unreachable code either. if (!I) return true; // PHI nodes use their operands on their incoming edges. - if (PHINode *PN = dyn_cast(I)) + if (auto *PN = dyn_cast(I)) return isReachableFromEntry(PN->getIncomingBlock(U)); // Everything else uses their operands in their own block. Index: lib/IR/Function.cpp =================================================================== --- lib/IR/Function.cpp +++ lib/IR/Function.cpp @@ -429,7 +429,7 @@ /// the Function Src to this one. void Function::copyAttributesFrom(const GlobalValue *Src) { GlobalObject::copyAttributesFrom(Src); - const Function *SrcF = dyn_cast(Src); + const auto *SrcF = dyn_cast(Src); if (!SrcF) return; @@ -524,16 +524,16 @@ /// everything. static std::string getMangledTypeStr(Type* Ty) { std::string Result; - if (PointerType* PTyp = dyn_cast(Ty)) { + if (auto * PTyp = dyn_cast(Ty)) { Result += "p" + llvm::utostr(PTyp->getAddressSpace()) + getMangledTypeStr(PTyp->getElementType()); - } else if (ArrayType* ATyp = dyn_cast(Ty)) { + } else if (auto * ATyp = dyn_cast(Ty)) { Result += "a" + llvm::utostr(ATyp->getNumElements()) + getMangledTypeStr(ATyp->getElementType()); - } else if (StructType* STyp = dyn_cast(Ty)) { + } else if (auto * STyp = dyn_cast(Ty)) { assert(!STyp->isLiteral() && "TODO: implement literal types"); Result += STyp->getName(); - } else if (FunctionType* FT = dyn_cast(Ty)) { + } else if (auto * FT = dyn_cast(Ty)) { Result += "f_" + getMangledTypeStr(FT->getReturnType()); for (size_t i = 0; i < FT->getNumParams(); i++) Result += getMangledTypeStr(FT->getParamType(i)); @@ -847,17 +847,17 @@ return Tys[D.getArgumentNumber()]; case IITDescriptor::ExtendArgument: { Type *Ty = Tys[D.getArgumentNumber()]; - if (VectorType *VTy = dyn_cast(Ty)) + if (auto *VTy = dyn_cast(Ty)) return VectorType::getExtendedElementVectorType(VTy); return IntegerType::get(Context, 2 * cast(Ty)->getBitWidth()); } case IITDescriptor::TruncArgument: { Type *Ty = Tys[D.getArgumentNumber()]; - if (VectorType *VTy = dyn_cast(Ty)) + if (auto *VTy = dyn_cast(Ty)) return VectorType::getTruncatedElementVectorType(VTy); - IntegerType *ITy = cast(Ty); + auto *ITy = cast(Ty); assert(ITy->getBitWidth() % 2 == 0); return IntegerType::get(Context, ITy->getBitWidth() / 2); } @@ -867,7 +867,7 @@ case IITDescriptor::SameVecWidthArgument: { Type *EltTy = DecodeFixedType(Infos, Tys, Context); Type *Ty = Tys[D.getArgumentNumber()]; - if (VectorType *VTy = dyn_cast(Ty)) { + if (auto *VTy = dyn_cast(Ty)) { return VectorType::get(EltTy, VTy->getNumElements()); } llvm_unreachable("unhandled"); @@ -878,7 +878,7 @@ } case IITDescriptor::PtrToElt: { Type *Ty = Tys[D.getArgumentNumber()]; - VectorType *VTy = dyn_cast(Ty); + auto *VTy = dyn_cast(Ty); if (!VTy) llvm_unreachable("Expected an argument of Vector Type"); Type *EltTy = VTy->getVectorElementType(); @@ -886,7 +886,7 @@ } case IITDescriptor::VecOfPtrsToElt: { Type *Ty = Tys[D.getArgumentNumber()]; - VectorType *VTy = dyn_cast(Ty); + auto *VTy = dyn_cast(Ty); if (!VTy) llvm_unreachable("Expected an argument of Vector Type"); Type *EltTy = VTy->getVectorElementType(); @@ -981,18 +981,18 @@ case IITDescriptor::Double: return !Ty->isDoubleTy(); case IITDescriptor::Integer: return !Ty->isIntegerTy(D.Integer_Width); case IITDescriptor::Vector: { - VectorType *VT = dyn_cast(Ty); + auto *VT = dyn_cast(Ty); return !VT || VT->getNumElements() != D.Vector_Width || matchIntrinsicType(VT->getElementType(), Infos, ArgTys); } case IITDescriptor::Pointer: { - PointerType *PT = dyn_cast(Ty); + auto *PT = dyn_cast(Ty); return !PT || PT->getAddressSpace() != D.Pointer_AddressSpace || matchIntrinsicType(PT->getElementType(), Infos, ArgTys); } case IITDescriptor::Struct: { - StructType *ST = dyn_cast(Ty); + auto *ST = dyn_cast(Ty); if (!ST || ST->getNumElements() != D.Struct_NumElements) return true; @@ -1028,9 +1028,9 @@ return true; Type *NewTy = ArgTys[D.getArgumentNumber()]; - if (VectorType *VTy = dyn_cast(NewTy)) + if (auto *VTy = dyn_cast(NewTy)) NewTy = VectorType::getExtendedElementVectorType(VTy); - else if (IntegerType *ITy = dyn_cast(NewTy)) + else if (auto *ITy = dyn_cast(NewTy)) NewTy = IntegerType::get(ITy->getContext(), 2 * ITy->getBitWidth()); else return true; @@ -1043,9 +1043,9 @@ return true; Type *NewTy = ArgTys[D.getArgumentNumber()]; - if (VectorType *VTy = dyn_cast(NewTy)) + if (auto *VTy = dyn_cast(NewTy)) NewTy = VectorType::getTruncatedElementVectorType(VTy); - else if (IntegerType *ITy = dyn_cast(NewTy)) + else if (auto *ITy = dyn_cast(NewTy)) NewTy = IntegerType::get(ITy->getContext(), ITy->getBitWidth() / 2); else return true; @@ -1061,9 +1061,9 @@ case IITDescriptor::SameVecWidthArgument: { if (D.getArgumentNumber() >= ArgTys.size()) return true; - VectorType * ReferenceType = + auto * ReferenceType = dyn_cast(ArgTys[D.getArgumentNumber()]); - VectorType *ThisArgType = dyn_cast(Ty); + auto *ThisArgType = dyn_cast(Ty); if (!ThisArgType || !ReferenceType || (ReferenceType->getVectorNumElements() != ThisArgType->getVectorNumElements())) @@ -1075,15 +1075,15 @@ if (D.getArgumentNumber() >= ArgTys.size()) return true; Type * ReferenceType = ArgTys[D.getArgumentNumber()]; - PointerType *ThisArgType = dyn_cast(Ty); + auto *ThisArgType = dyn_cast(Ty); return (!ThisArgType || ThisArgType->getElementType() != ReferenceType); } case IITDescriptor::PtrToElt: { if (D.getArgumentNumber() >= ArgTys.size()) return true; - VectorType * ReferenceType = + auto * ReferenceType = dyn_cast (ArgTys[D.getArgumentNumber()]); - PointerType *ThisArgType = dyn_cast(Ty); + auto *ThisArgType = dyn_cast(Ty); return (!ThisArgType || !ReferenceType || ThisArgType->getElementType() != ReferenceType->getElementType()); @@ -1091,14 +1091,14 @@ case IITDescriptor::VecOfPtrsToElt: { if (D.getArgumentNumber() >= ArgTys.size()) return true; - VectorType * ReferenceType = + auto * ReferenceType = dyn_cast (ArgTys[D.getArgumentNumber()]); - VectorType *ThisArgVecTy = dyn_cast(Ty); + auto *ThisArgVecTy = dyn_cast(Ty); if (!ThisArgVecTy || !ReferenceType || (ReferenceType->getVectorNumElements() != ThisArgVecTy->getVectorNumElements())) return true; - PointerType *ThisArgEltTy = + auto *ThisArgEltTy = dyn_cast(ThisArgVecTy->getVectorElementType()); if (!ThisArgEltTy) return true; @@ -1284,7 +1284,7 @@ Optional Function::getEntryCount() const { MDNode *MD = getMetadata(LLVMContext::MD_prof); if (MD && MD->getOperand(0)) - if (MDString *MDS = dyn_cast(MD->getOperand(0))) + if (auto *MDS = dyn_cast(MD->getOperand(0))) if (MDS->getString().equals("function_entry_count")) { ConstantInt *CI = mdconst::extract(MD->getOperand(1)); uint64_t Count = CI->getValue().getZExtValue(); Index: lib/IR/Globals.cpp =================================================================== --- lib/IR/Globals.cpp +++ lib/IR/Globals.cpp @@ -31,7 +31,7 @@ //===----------------------------------------------------------------------===// bool GlobalValue::isMaterializable() const { - if (const Function *F = dyn_cast(this)) + if (const auto *F = dyn_cast(this)) return F->isMaterializable(); return false; } @@ -162,11 +162,11 @@ bool GlobalValue::isDeclaration() const { // Globals are definitions if they have an initializer. - if (const GlobalVariable *GV = dyn_cast(this)) + if (const auto *GV = dyn_cast(this)) return GV->getNumOperands() == 0; // Functions are definitions if they have a body. - if (const Function *F = dyn_cast(this)) + if (const auto *F = dyn_cast(this)) return F->empty() && !F->isMaterializable(); // Aliases and ifuncs are always definitions. @@ -320,7 +320,7 @@ /// from the GlobalVariable Src to this one. void GlobalVariable::copyAttributesFrom(const GlobalValue *Src) { GlobalObject::copyAttributesFrom(Src); - if (const GlobalVariable *SrcVar = dyn_cast(Src)) { + if (const auto *SrcVar = dyn_cast(Src)) { setThreadLocalMode(SrcVar->getThreadLocalMode()); setExternallyInitialized(SrcVar->isExternallyInitialized()); } Index: lib/IR/IRBuilder.cpp =================================================================== --- lib/IR/IRBuilder.cpp +++ lib/IR/IRBuilder.cpp @@ -44,7 +44,7 @@ } Value *IRBuilderBase::getCastedInt8PtrValue(Value *Ptr) { - PointerType *PT = cast(Ptr->getType()); + auto *PT = cast(Ptr->getType()); if (PT->getElementType()->isIntegerTy(8)) return Ptr; @@ -232,7 +232,7 @@ CallInst *IRBuilderBase::CreateMaskedLoad(Value *Ptr, unsigned Align, Value *Mask, Value *PassThru, const Twine &Name) { - PointerType *PtrTy = cast(Ptr->getType()); + auto *PtrTy = cast(Ptr->getType()); Type *DataTy = PtrTy->getElementType(); assert(DataTy->isVectorTy() && "Ptr should point to a vector"); if (!PassThru) @@ -251,7 +251,7 @@ /// be accessed in memory CallInst *IRBuilderBase::CreateMaskedStore(Value *Val, Value *Ptr, unsigned Align, Value *Mask) { - PointerType *PtrTy = cast(Ptr->getType()); + auto *PtrTy = cast(Ptr->getType()); Type *DataTy = PtrTy->getElementType(); assert(DataTy->isVectorTy() && "Ptr should point to a vector"); Type *OverloadedTypes[] = { DataTy, PtrTy }; @@ -357,7 +357,7 @@ ArrayRef TransitionArgs, ArrayRef DeoptArgs, ArrayRef GCArgs, const Twine &Name) { // Extract out the type of the callee. - PointerType *FuncPtrType = cast(ActualCallee->getType()); + auto *FuncPtrType = cast(ActualCallee->getType()); assert(isa(FuncPtrType->getElementType()) && "actual callee must be a callable value"); @@ -408,7 +408,7 @@ uint32_t Flags, ArrayRef InvokeArgs, ArrayRef TransitionArgs, ArrayRef DeoptArgs, ArrayRef GCArgs, const Twine &Name) { // Extract out the type of the callee. - PointerType *FuncPtrType = cast(ActualInvokee->getType()); + auto *FuncPtrType = cast(ActualInvokee->getType()); assert(isa(FuncPtrType->getElementType()) && "actual callee must be a callable value"); Index: lib/IR/InlineAsm.cpp =================================================================== --- lib/IR/InlineAsm.cpp +++ lib/IR/InlineAsm.cpp @@ -284,7 +284,7 @@ if (Ty->getReturnType()->isStructTy()) return false; break; default: - StructType *STy = dyn_cast(Ty->getReturnType()); + auto *STy = dyn_cast(Ty->getReturnType()); if (!STy || STy->getNumElements() != NumOutputs) return false; break; Index: lib/IR/Instruction.cpp =================================================================== --- lib/IR/Instruction.cpp +++ lib/IR/Instruction.cpp @@ -332,41 +332,41 @@ assert(I1->getOpcode() == I2->getOpcode() && "Can not compare special state of different instructions"); - if (const AllocaInst *AI = dyn_cast(I1)) + if (const auto *AI = dyn_cast(I1)) return AI->getAllocatedType() == cast(I2)->getAllocatedType() && (AI->getAlignment() == cast(I2)->getAlignment() || IgnoreAlignment); - if (const LoadInst *LI = dyn_cast(I1)) + if (const auto *LI = dyn_cast(I1)) return LI->isVolatile() == cast(I2)->isVolatile() && (LI->getAlignment() == cast(I2)->getAlignment() || IgnoreAlignment) && LI->getOrdering() == cast(I2)->getOrdering() && LI->getSynchScope() == cast(I2)->getSynchScope(); - if (const StoreInst *SI = dyn_cast(I1)) + if (const auto *SI = dyn_cast(I1)) return SI->isVolatile() == cast(I2)->isVolatile() && (SI->getAlignment() == cast(I2)->getAlignment() || IgnoreAlignment) && SI->getOrdering() == cast(I2)->getOrdering() && SI->getSynchScope() == cast(I2)->getSynchScope(); - if (const CmpInst *CI = dyn_cast(I1)) + if (const auto *CI = dyn_cast(I1)) return CI->getPredicate() == cast(I2)->getPredicate(); - if (const CallInst *CI = dyn_cast(I1)) + if (const auto *CI = dyn_cast(I1)) return CI->isTailCall() == cast(I2)->isTailCall() && CI->getCallingConv() == cast(I2)->getCallingConv() && CI->getAttributes() == cast(I2)->getAttributes() && CI->hasIdenticalOperandBundleSchema(*cast(I2)); - if (const InvokeInst *CI = dyn_cast(I1)) + if (const auto *CI = dyn_cast(I1)) return CI->getCallingConv() == cast(I2)->getCallingConv() && CI->getAttributes() == cast(I2)->getAttributes() && CI->hasIdenticalOperandBundleSchema(*cast(I2)); - if (const InsertValueInst *IVI = dyn_cast(I1)) + if (const auto *IVI = dyn_cast(I1)) return IVI->getIndices() == cast(I2)->getIndices(); - if (const ExtractValueInst *EVI = dyn_cast(I1)) + if (const auto *EVI = dyn_cast(I1)) return EVI->getIndices() == cast(I2)->getIndices(); - if (const FenceInst *FI = dyn_cast(I1)) + if (const auto *FI = dyn_cast(I1)) return FI->getOrdering() == cast(I2)->getOrdering() && FI->getSynchScope() == cast(I2)->getSynchScope(); - if (const AtomicCmpXchgInst *CXI = dyn_cast(I1)) + if (const auto *CXI = dyn_cast(I1)) return CXI->isVolatile() == cast(I2)->isVolatile() && CXI->isWeak() == cast(I2)->isWeak() && CXI->getSuccessOrdering() == @@ -374,7 +374,7 @@ CXI->getFailureOrdering() == cast(I2)->getFailureOrdering() && CXI->getSynchScope() == cast(I2)->getSynchScope(); - if (const AtomicRMWInst *RMWI = dyn_cast(I1)) + if (const auto *RMWI = dyn_cast(I1)) return RMWI->getOperation() == cast(I2)->getOperation() && RMWI->isVolatile() == cast(I2)->isVolatile() && RMWI->getOrdering() == cast(I2)->getOrdering() && @@ -403,8 +403,8 @@ if (!std::equal(op_begin(), op_end(), I->op_begin())) return false; - if (const PHINode *thisPHI = dyn_cast(this)) { - const PHINode *otherPHI = cast(I); + if (const auto *thisPHI = dyn_cast(this)) { + const auto *otherPHI = cast(I); return std::equal(thisPHI->block_begin(), thisPHI->block_end(), otherPHI->block_begin()); } @@ -442,8 +442,8 @@ for (const Use &U : uses()) { // PHI nodes uses values in the corresponding predecessor block. For other // instructions, just check to see whether the parent of the use matches up. - const Instruction *I = cast(U.getUser()); - const PHINode *PN = dyn_cast(I); + const auto *I = cast(U.getUser()); + const auto *PN = dyn_cast(I); if (!PN) { if (I->getParent() != BB) return true; @@ -512,7 +512,7 @@ } bool Instruction::mayThrow() const { - if (const CallInst *CI = dyn_cast(this)) + if (const auto *CI = dyn_cast(this)) return !CI->doesNotThrow(); if (const auto *CRI = dyn_cast(this)) return CRI->unwindsToCaller(); @@ -601,7 +601,7 @@ !isa(ProfileData->getOperand(0))) return; - MDString *MDName = cast(ProfileData->getOperand(0)); + auto *MDName = cast(ProfileData->getOperand(0)); if (MDName->getString() != "branch_weights") return; Index: lib/IR/Instructions.cpp =================================================================== --- lib/IR/Instructions.cpp +++ lib/IR/Instructions.cpp @@ -66,11 +66,11 @@ if (Op1->getType()->isTokenTy()) return "select values cannot have token type"; - if (VectorType *VT = dyn_cast(Op0->getType())) { + if (auto *VT = dyn_cast(Op0->getType())) { // Vector select. if (VT->getElementType() != Type::getInt1Ty(Op0->getContext())) return "vector select condition element type must be i1"; - VectorType *ET = dyn_cast(Op1->getType()); + auto *ET = dyn_cast(Op1->getType()); if (!ET) return "selected values for vector select must be vectors"; if (ET->getNumElements() != VT->getNumElements()) @@ -411,7 +411,7 @@ /// IsConstantOne - Return true only if val is constant int 1 static bool IsConstantOne(Value *val) { assert(val && "IsConstantOne does not work with nullptr val"); - const ConstantInt *CVal = dyn_cast(val); + const auto *CVal = dyn_cast(val); return CVal && CVal->isOne(); } @@ -442,7 +442,7 @@ if (!IsConstantOne(ArraySize)) { if (IsConstantOne(AllocSize)) { AllocSize = ArraySize; // Operand * 1 = Operand - } else if (Constant *CO = dyn_cast(ArraySize)) { + } else if (auto *CO = dyn_cast(ArraySize)) { Constant *Scale = ConstantExpr::getIntegerCast(CO, IntPtrTy, false /*ZExt*/); // Malloc arg is constant product of type size and array size @@ -487,7 +487,7 @@ } } MCall->setTailCall(); - if (Function *F = dyn_cast(MallocFunc)) { + if (auto *F = dyn_cast(MallocFunc)) { MCall->setCallingConv(F->getCallingConv()); if (!F->doesNotAlias(0)) F->setDoesNotAlias(0); } @@ -573,7 +573,7 @@ Result = CallInst::Create(FreeFunc, PtrCast, Bundles, ""); } Result->setTailCall(); - if (Function *F = dyn_cast(FreeFunc)) + if (auto *F = dyn_cast(FreeFunc)) Result->setCallingConv(F->getCallingConv()); return Result; @@ -1247,7 +1247,7 @@ } bool AllocaInst::isArrayAllocation() const { - if (ConstantInt *CI = dyn_cast(getOperand(0))) + if (auto *CI = dyn_cast(getOperand(0))) return !CI->isOne(); return true; } @@ -1632,7 +1632,7 @@ unsigned CurIdx = 1; for (; CurIdx != IdxList.size(); ++CurIdx) { - CompositeType *CT = dyn_cast(Agg); + auto *CT = dyn_cast(Agg); if (!CT || CT->isPointerTy()) return nullptr; IndexTy Index = IdxList[CurIdx]; if (!CT->indexValid(Index)) return nullptr; @@ -1659,7 +1659,7 @@ /// value, just potentially different types. bool GetElementPtrInst::hasAllZeroIndices() const { for (unsigned i = 1, e = getNumOperands(); i != e; ++i) { - if (ConstantInt *CI = dyn_cast(getOperand(i))) { + if (auto *CI = dyn_cast(getOperand(i))) { if (!CI->isZero()) return false; } else { return false; @@ -1827,7 +1827,7 @@ return false; // Mask must be vector of i32. - VectorType *MaskTy = dyn_cast(Mask->getType()); + auto *MaskTy = dyn_cast(Mask->getType()); if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32)) return false; @@ -1835,10 +1835,10 @@ if (isa(Mask) || isa(Mask)) return true; - if (const ConstantVector *MV = dyn_cast(Mask)) { + if (const auto *MV = dyn_cast(Mask)) { unsigned V1Size = cast(V1->getType())->getNumElements(); for (Value *Op : MV->operands()) { - if (ConstantInt *CI = dyn_cast(Op)) { + if (auto *CI = dyn_cast(Op)) { if (CI->uge(V1Size*2)) return false; } else if (!isa(Op)) { @@ -1848,7 +1848,7 @@ return true; } - if (const ConstantDataSequential *CDS = + if (const auto *CDS = dyn_cast(Mask)) { unsigned V1Size = cast(V1->getType())->getNumElements(); for (unsigned i = 0, e = MaskTy->getNumElements(); i != e; ++i) @@ -1861,7 +1861,7 @@ // used as the shuffle mask. When this occurs, the shuffle mask will // fall into this case and fail. To avoid this error, do this bit of // ugliness to allow such a mask pass. - if (const ConstantExpr *CE = dyn_cast(Mask)) + if (const auto *CE = dyn_cast(Mask)) if (CE->getOpcode() == Instruction::UserOp1) return true; @@ -1870,7 +1870,7 @@ int ShuffleVectorInst::getMaskValue(Constant *Mask, unsigned i) { assert(i < Mask->getType()->getVectorNumElements() && "Index out of range"); - if (ConstantDataSequential *CDS =dyn_cast(Mask)) + if (auto *CDS =dyn_cast(Mask)) return CDS->getElementAsInteger(i); Constant *C = Mask->getAggregateElement(i); if (isa(C)) @@ -1882,7 +1882,7 @@ SmallVectorImpl &Result) { unsigned NumElts = Mask->getType()->getVectorNumElements(); - if (ConstantDataSequential *CDS=dyn_cast(Mask)) { + if (auto *CDS=dyn_cast(Mask)) { for (unsigned i = 0; i != NumElts; ++i) Result.push_back(CDS->getElementAsInteger(i)); return; @@ -1963,10 +1963,10 @@ // insertvalue we need to check array indexing manually. // Since the only other types we can index into are struct types it's just // as easy to check those manually as well. - if (ArrayType *AT = dyn_cast(Agg)) { + if (auto *AT = dyn_cast(Agg)) { if (Index >= AT->getNumElements()) return nullptr; - } else if (StructType *ST = dyn_cast(Agg)) { + } else if (auto *ST = dyn_cast(Agg)) { if (Index >= ST->getNumElements()) return nullptr; } else { @@ -2172,23 +2172,23 @@ // isConstantAllOnes - Helper function for several functions below static inline bool isConstantAllOnes(const Value *V) { - if (const Constant *C = dyn_cast(V)) + if (const auto *C = dyn_cast(V)) return C->isAllOnesValue(); return false; } bool BinaryOperator::isNeg(const Value *V) { - if (const BinaryOperator *Bop = dyn_cast(V)) + if (const auto *Bop = dyn_cast(V)) if (Bop->getOpcode() == Instruction::Sub) - if (Constant *C = dyn_cast(Bop->getOperand(0))) + if (auto *C = dyn_cast(Bop->getOperand(0))) return C->isNegativeZeroValue(); return false; } bool BinaryOperator::isFNeg(const Value *V, bool IgnoreZeroSign) { - if (const BinaryOperator *Bop = dyn_cast(V)) + if (const auto *Bop = dyn_cast(V)) if (Bop->getOpcode() == Instruction::FSub) - if (Constant *C = dyn_cast(Bop->getOperand(0))) { + if (auto *C = dyn_cast(Bop->getOperand(0))) { if (!IgnoreZeroSign) IgnoreZeroSign = cast(V)->hasNoSignedZeros(); return !IgnoreZeroSign ? C->isNegativeZeroValue() : C->isZeroValue(); @@ -2197,7 +2197,7 @@ } bool BinaryOperator::isNot(const Value *V) { - if (const BinaryOperator *Bop = dyn_cast(V)) + if (const auto *Bop = dyn_cast(V)) return (Bop->getOpcode() == Instruction::Xor && (isConstantAllOnes(Bop->getOperand(1)) || isConstantAllOnes(Bop->getOperand(0)))); @@ -2222,7 +2222,7 @@ Value *BinaryOperator::getNotArgument(Value *BinOp) { assert(isNot(BinOp) && "getNotArgument on non-'not' instruction!"); - BinaryOperator *BO = cast(BinOp); + auto *BO = cast(BinOp); Value *Op0 = BO->getOperand(0); Value *Op1 = BO->getOperand(1); if (isConstantAllOnes(Op0)) return Op1; @@ -2814,8 +2814,8 @@ if (SrcTy == DestTy) return true; - if (VectorType *SrcVecTy = dyn_cast(SrcTy)) - if (VectorType *DestVecTy = dyn_cast(DestTy)) + if (auto *SrcVecTy = dyn_cast(SrcTy)) + if (auto *DestVecTy = dyn_cast(DestTy)) if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { // An element by element cast. Valid if casting the elements is valid. SrcTy = SrcVecTy->getElementType(); @@ -2869,8 +2869,8 @@ if (SrcTy == DestTy) return true; - if (VectorType *SrcVecTy = dyn_cast(SrcTy)) { - if (VectorType *DestVecTy = dyn_cast(DestTy)) { + if (auto *SrcVecTy = dyn_cast(SrcTy)) { + if (auto *DestVecTy = dyn_cast(DestTy)) { if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { // An element by element cast. Valid if casting the elements is valid. SrcTy = SrcVecTy->getElementType(); @@ -2879,8 +2879,8 @@ } } - if (PointerType *DestPtrTy = dyn_cast(DestTy)) { - if (PointerType *SrcPtrTy = dyn_cast(SrcTy)) { + if (auto *DestPtrTy = dyn_cast(DestTy)) { + if (auto *SrcPtrTy = dyn_cast(SrcTy)) { return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace(); } } @@ -2933,8 +2933,8 @@ return BitCast; // FIXME: Check address space sizes here - if (VectorType *SrcVecTy = dyn_cast(SrcTy)) - if (VectorType *DestVecTy = dyn_cast(DestTy)) + if (auto *SrcVecTy = dyn_cast(SrcTy)) + if (auto *DestVecTy = dyn_cast(DestTy)) if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) { // An element by element cast. Find the appropriate opcode based on the // element types. @@ -3075,7 +3075,7 @@ case Instruction::PtrToInt: if (isa(SrcTy) != isa(DstTy)) return false; - if (VectorType *VT = dyn_cast(SrcTy)) + if (auto *VT = dyn_cast(SrcTy)) if (VT->getNumElements() != cast(DstTy)->getNumElements()) return false; return SrcTy->getScalarType()->isPointerTy() && @@ -3083,14 +3083,14 @@ case Instruction::IntToPtr: if (isa(SrcTy) != isa(DstTy)) return false; - if (VectorType *VT = dyn_cast(SrcTy)) + if (auto *VT = dyn_cast(SrcTy)) if (VT->getNumElements() != cast(DstTy)->getNumElements()) return false; return SrcTy->getScalarType()->isIntegerTy() && DstTy->getScalarType()->isPointerTy(); case Instruction::BitCast: { - PointerType *SrcPtrTy = dyn_cast(SrcTy->getScalarType()); - PointerType *DstPtrTy = dyn_cast(DstTy->getScalarType()); + auto *SrcPtrTy = dyn_cast(SrcTy->getScalarType()); + auto *DstPtrTy = dyn_cast(DstTy->getScalarType()); // BitCast implies a no-op cast of type only. No bits change. // However, you can't cast pointers to anything but pointers. @@ -3107,8 +3107,8 @@ return false; // A vector of pointers must have the same number of elements. - if (VectorType *SrcVecTy = dyn_cast(SrcTy)) { - if (VectorType *DstVecTy = dyn_cast(DstTy)) + if (auto *SrcVecTy = dyn_cast(SrcTy)) { + if (auto *DstVecTy = dyn_cast(DstTy)) return (SrcVecTy->getNumElements() == DstVecTy->getNumElements()); return false; @@ -3117,19 +3117,19 @@ return true; } case Instruction::AddrSpaceCast: { - PointerType *SrcPtrTy = dyn_cast(SrcTy->getScalarType()); + auto *SrcPtrTy = dyn_cast(SrcTy->getScalarType()); if (!SrcPtrTy) return false; - PointerType *DstPtrTy = dyn_cast(DstTy->getScalarType()); + auto *DstPtrTy = dyn_cast(DstTy->getScalarType()); if (!DstPtrTy) return false; if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace()) return false; - if (VectorType *SrcVecTy = dyn_cast(SrcTy)) { - if (VectorType *DstVecTy = dyn_cast(DstTy)) + if (auto *SrcVecTy = dyn_cast(SrcTy)) { + if (auto *DstVecTy = dyn_cast(DstTy)) return (SrcVecTy->getNumElements() == DstVecTy->getNumElements()); return false; @@ -3357,20 +3357,20 @@ } void CmpInst::swapOperands() { - if (ICmpInst *IC = dyn_cast(this)) + if (auto *IC = dyn_cast(this)) IC->swapOperands(); else cast(this)->swapOperands(); } bool CmpInst::isCommutative() const { - if (const ICmpInst *IC = dyn_cast(this)) + if (const auto *IC = dyn_cast(this)) return IC->isCommutative(); return cast(this)->isCommutative(); } bool CmpInst::isEquality() const { - if (const ICmpInst *IC = dyn_cast(this)) + if (const auto *IC = dyn_cast(this)) return IC->isEquality(); return cast(this)->isEquality(); } Index: lib/IR/Mangler.cpp =================================================================== --- lib/IR/Mangler.cpp +++ lib/IR/Mangler.cpp @@ -133,7 +133,7 @@ // Mangle functions with Microsoft calling conventions specially. Only do // this mangling for x86_64 vectorcall and 32-bit x86. - const Function *MSFunc = dyn_cast(GV); + const auto *MSFunc = dyn_cast(GV); if (Name.startswith("\01")) MSFunc = nullptr; // Don't mangle when \01 is present. CallingConv::ID CC = Index: lib/IR/Metadata.cpp =================================================================== --- lib/IR/Metadata.cpp +++ lib/IR/Metadata.cpp @@ -842,7 +842,7 @@ static MDNode *getOrSelfReference(LLVMContext &Context, ArrayRef Ops) { if (!Ops.empty()) - if (MDNode *N = dyn_cast_or_null(Ops[0])) + if (auto *N = dyn_cast_or_null(Ops[0])) if (N->getNumOperands() == Ops.size() && N == N->getOperand(0)) { for (unsigned I = 1, E = Ops.size(); I != E; ++I) if (Ops[I] != N->getOperand(I)) @@ -1420,7 +1420,7 @@ // to prepend the adjustment: // !DIExpression(DW_OP_plus, Offset, [original expr]) if (Offset != 0 && MD.first == LLVMContext::MD_dbg) { - DIGlobalVariable *GV = cast(MD.second); + auto *GV = cast(MD.second); DIExpression *E = GV->getExpr(); ArrayRef OrigElements; if (E) Index: lib/IR/Module.cpp =================================================================== --- lib/IR/Module.cpp +++ lib/IR/Module.cpp @@ -209,7 +209,7 @@ /// have an local. By default, these types are not returned. /// GlobalVariable *Module::getGlobalVariable(StringRef Name, bool AllowLocal) { - if (GlobalVariable *Result = + if (auto *Result = dyn_cast_or_null(getNamedValue(Name))) if (AllowLocal || !Result->hasLocalLinkage()) return Result; @@ -224,7 +224,7 @@ /// existing global. Constant *Module::getOrInsertGlobal(StringRef Name, Type *Ty) { // See if we have a definition for the specified global already. - GlobalVariable *GV = dyn_cast_or_null(getNamedValue(Name)); + auto *GV = dyn_cast_or_null(getNamedValue(Name)); if (!GV) { // Nope, add it GlobalVariable *New = @@ -313,7 +313,7 @@ dyn_cast_or_null(Flag->getOperand(1))) { // Check the operands of the MDNode before accessing the operands. // The verifier will actually catch these failures. - MDString *Key = cast(Flag->getOperand(1)); + auto *Key = cast(Flag->getOperand(1)); Metadata *Val = Flag->getOperand(2); Flags.push_back(ModuleFlagEntry(MFB, Key, Val)); } @@ -532,9 +532,9 @@ if (!GV || !GV->hasInitializer()) return GV; - const ConstantArray *Init = cast(GV->getInitializer()); + const auto *Init = cast(GV->getInitializer()); for (Value *Op : Init->operands()) { - GlobalValue *G = cast(Op->stripPointerCastsNoFollowAliases()); + auto *G = cast(Op->stripPointerCastsNoFollowAliases()); Set.insert(G); } return GV; Index: lib/IR/Operator.cpp =================================================================== --- lib/IR/Operator.cpp +++ lib/IR/Operator.cpp @@ -26,7 +26,7 @@ for (gep_type_iterator GTI = gep_type_begin(this), GTE = gep_type_end(this); GTI != GTE; ++GTI) { - ConstantInt *OpC = dyn_cast(GTI.getOperand()); + auto *OpC = dyn_cast(GTI.getOperand()); if (!OpC) return false; if (OpC->isZero()) Index: lib/IR/ProfileSummary.cpp =================================================================== --- lib/IR/ProfileSummary.cpp +++ lib/IR/ProfileSummary.cpp @@ -90,8 +90,8 @@ return false; if (MD->getNumOperands() != 2) return false; - MDString *KeyMD = dyn_cast(MD->getOperand(0)); - ConstantAsMetadata *ValMD = dyn_cast(MD->getOperand(1)); + auto *KeyMD = dyn_cast(MD->getOperand(0)); + auto *ValMD = dyn_cast(MD->getOperand(1)); if (!KeyMD || !ValMD) return false; if (!KeyMD->getString().equals(Key)) @@ -104,8 +104,8 @@ static bool isKeyValuePair(MDTuple *MD, const char *Key, const char *Val) { if (!MD || MD->getNumOperands() != 2) return false; - MDString *KeyMD = dyn_cast(MD->getOperand(0)); - MDString *ValMD = dyn_cast(MD->getOperand(1)); + auto *KeyMD = dyn_cast(MD->getOperand(0)); + auto *ValMD = dyn_cast(MD->getOperand(1)); if (!KeyMD || !ValMD) return false; if (!KeyMD->getString().equals(Key) || !ValMD->getString().equals(Val)) @@ -117,21 +117,21 @@ static bool getSummaryFromMD(MDTuple *MD, SummaryEntryVector &Summary) { if (!MD || MD->getNumOperands() != 2) return false; - MDString *KeyMD = dyn_cast(MD->getOperand(0)); + auto *KeyMD = dyn_cast(MD->getOperand(0)); if (!KeyMD || !KeyMD->getString().equals("DetailedSummary")) return false; - MDTuple *EntriesMD = dyn_cast(MD->getOperand(1)); + auto *EntriesMD = dyn_cast(MD->getOperand(1)); if (!EntriesMD) return false; for (auto &&MDOp : EntriesMD->operands()) { - MDTuple *EntryMD = dyn_cast(MDOp); + auto *EntryMD = dyn_cast(MDOp); if (!EntryMD || EntryMD->getNumOperands() != 3) return false; - ConstantAsMetadata *Op0 = + auto *Op0 = dyn_cast(EntryMD->getOperand(0)); - ConstantAsMetadata *Op1 = + auto *Op1 = dyn_cast(EntryMD->getOperand(1)); - ConstantAsMetadata *Op2 = + auto *Op2 = dyn_cast(EntryMD->getOperand(2)); if (!Op0 || !Op1 || !Op2) @@ -148,7 +148,7 @@ return nullptr; if (!isa(MD)) return nullptr; - MDTuple *Tuple = cast(MD); + auto *Tuple = cast(MD); if (Tuple->getNumOperands() != 8) return nullptr; Index: lib/IR/Type.cpp =================================================================== --- lib/IR/Type.cpp +++ lib/IR/Type.cpp @@ -577,10 +577,10 @@ // vector case all of the indices must be equal. if (!V->getType()->getScalarType()->isIntegerTy(32)) return false; - const Constant *C = dyn_cast(V); + const auto *C = dyn_cast(V); if (C && V->getType()->isVectorTy()) C = C->getSplatValue(); - const ConstantInt *CU = dyn_cast_or_null(C); + const auto *CU = dyn_cast_or_null(C); return CU && CU->getZExtValue() < STy->getNumElements(); } Index: lib/IR/TypeFinder.cpp =================================================================== --- lib/IR/TypeFinder.cpp +++ lib/IR/TypeFinder.cpp @@ -100,7 +100,7 @@ Ty = TypeWorklist.pop_back_val(); // If this is a structure or opaque type, add a name for the type. - if (StructType *STy = dyn_cast(Ty)) + if (auto *STy = dyn_cast(Ty)) if (!OnlyNamed || STy->hasName()) StructTypes.push_back(STy); @@ -140,7 +140,7 @@ return; // Look in operands for types. - const User *U = cast(V); + const auto *U = cast(V); for (Constant::const_op_iterator I = U->op_begin(), E = U->op_end(); I != E;++I) incorporateValue(*I); Index: lib/IR/Value.cpp =================================================================== --- lib/IR/Value.cpp +++ lib/IR/Value.cpp @@ -140,17 +140,17 @@ static bool getSymTab(Value *V, ValueSymbolTable *&ST) { ST = nullptr; - if (Instruction *I = dyn_cast(V)) { + if (auto *I = dyn_cast(V)) { if (BasicBlock *P = I->getParent()) if (Function *PP = P->getParent()) ST = PP->getValueSymbolTable(); - } else if (BasicBlock *BB = dyn_cast(V)) { + } else if (auto *BB = dyn_cast(V)) { if (Function *P = BB->getParent()) ST = P->getValueSymbolTable(); - } else if (GlobalValue *GV = dyn_cast(V)) { + } else if (auto *GV = dyn_cast(V)) { if (Module *P = GV->getParent()) ST = &P->getValueSymbolTable(); - } else if (Argument *A = dyn_cast(V)) { + } else if (auto *A = dyn_cast(V)) { if (Function *P = A->getParent()) ST = P->getValueSymbolTable(); } else { @@ -256,7 +256,7 @@ void Value::setName(const Twine &NewName) { setNameImpl(NewName); - if (Function *F = dyn_cast(this)) + if (auto *F = dyn_cast(this)) F->recalculateIntrinsicID(); } @@ -394,7 +394,7 @@ U.set(New); } - if (BasicBlock *BB = dyn_cast(this)) + if (auto *BB = dyn_cast(this)) BB->replaceSuccessorsPhiUsesWith(cast(New)); } @@ -447,7 +447,7 @@ Visited.insert(V); do { - if (GEPOperator *GEP = dyn_cast(V)) { + if (auto *GEP = dyn_cast(V)) { switch (StripKind) { case PSK_ZeroIndicesAndAliases: case PSK_ZeroIndices: @@ -467,7 +467,7 @@ } else if (Operator::getOpcode(V) == Instruction::BitCast || Operator::getOpcode(V) == Instruction::AddrSpaceCast) { V = cast(V)->getOperand(0); - } else if (GlobalAlias *GA = dyn_cast(V)) { + } else if (auto *GA = dyn_cast(V)) { if (StripKind == PSK_ZeroIndices || GA->isInterposable()) return V; V = GA->getAliasee(); @@ -514,7 +514,7 @@ Visited.insert(this); Value *V = this; do { - if (GEPOperator *GEP = dyn_cast(V)) { + if (auto *GEP = dyn_cast(V)) { if (!GEP->isInBounds()) return V; APInt GEPOffset(Offset); @@ -524,7 +524,7 @@ V = GEP->getPointerOperand(); } else if (Operator::getOpcode(V) == Instruction::BitCast) { V = cast(V)->getOperand(0); - } else if (GlobalAlias *GA = dyn_cast(V)) { + } else if (auto *GA = dyn_cast(V)) { V = GA->getAliasee(); } else { if (auto CS = CallSite(V)) @@ -551,7 +551,7 @@ unsigned DerefBytes = 0; CanBeNull = false; - if (const Argument *A = dyn_cast(this)) { + if (const auto *A = dyn_cast(this)) { DerefBytes = A->getDereferenceableBytes(); if (DerefBytes == 0 && A->hasByValAttr() && A->getType()->isSized()) { DerefBytes = DL.getTypeStoreSize(A->getType()); @@ -567,7 +567,7 @@ DerefBytes = CS.getDereferenceableOrNullBytes(0); CanBeNull = true; } - } else if (const LoadInst *LI = dyn_cast(this)) { + } else if (const auto *LI = dyn_cast(this)) { if (MDNode *MD = LI->getMetadata(LLVMContext::MD_dereferenceable)) { ConstantInt *CI = mdconst::extract(MD->getOperand(0)); DerefBytes = CI->getLimitedValue(); @@ -616,7 +616,7 @@ } } } - } else if (const Argument *A = dyn_cast(this)) { + } else if (const auto *A = dyn_cast(this)) { Align = A->getParamAlignment(); if (!Align && A->hasStructRetAttr()) { @@ -625,7 +625,7 @@ if (EltTy->isSized()) Align = DL.getABITypeAlignment(EltTy); } - } else if (const AllocaInst *AI = dyn_cast(this)) { + } else if (const auto *AI = dyn_cast(this)) { Align = AI->getAlignment(); if (Align == 0) { Type *AllocatedType = AI->getAllocatedType(); @@ -634,7 +634,7 @@ } } else if (auto CS = ImmutableCallSite(this)) Align = CS.getAttributes().getParamAlignment(AttributeSet::ReturnIndex); - else if (const LoadInst *LI = dyn_cast(this)) + else if (const auto *LI = dyn_cast(this)) if (MDNode *MD = LI->getMetadata(LLVMContext::MD_align)) { ConstantInt *CI = mdconst::extract(MD->getOperand(0)); Align = CI->getLimitedValue(); @@ -645,7 +645,7 @@ Value *Value::DoPHITranslation(const BasicBlock *CurBB, const BasicBlock *PredBB) { - PHINode *PN = dyn_cast(this); + auto *PN = dyn_cast(this); if (PN && PN->getParent() == CurBB) return PN->getIncomingValueForBlock(PredBB); return this; Index: lib/IR/ValueTypes.cpp =================================================================== --- lib/IR/ValueTypes.cpp +++ lib/IR/ValueTypes.cpp @@ -109,9 +109,9 @@ unsigned EVT::getExtendedSizeInBits() const { assert(isExtended() && "Type is not extended!"); - if (IntegerType *ITy = dyn_cast(LLVMTy)) + if (auto *ITy = dyn_cast(LLVMTy)) return ITy->getBitWidth(); - if (VectorType *VTy = dyn_cast(LLVMTy)) + if (auto *VTy = dyn_cast(LLVMTy)) return VTy->getBitWidth(); llvm_unreachable("Unrecognized extended type!"); } @@ -296,7 +296,7 @@ case Type::PPC_FP128TyID: return MVT(MVT::ppcf128); case Type::PointerTyID: return MVT(MVT::iPTR); case Type::VectorTyID: { - VectorType *VTy = cast(Ty); + auto *VTy = cast(Ty); return getVectorVT( getVT(VTy->getElementType(), false), VTy->getNumElements()); } @@ -313,7 +313,7 @@ case Type::IntegerTyID: return getIntegerVT(Ty->getContext(), cast(Ty)->getBitWidth()); case Type::VectorTyID: { - VectorType *VTy = cast(Ty); + auto *VTy = cast(Ty); return getVectorVT(Ty->getContext(), getEVT(VTy->getElementType(), false), VTy->getNumElements()); } Index: lib/IR/Verifier.cpp =================================================================== --- lib/IR/Verifier.cpp +++ lib/IR/Verifier.cpp @@ -533,7 +533,7 @@ "Only global variables can have appending linkage!", &GV); if (GV.hasAppendingLinkage()) { - const GlobalVariable *GVar = dyn_cast(&GV); + const auto *GVar = dyn_cast(&GV); Assert(GVar && GVar->getValueType()->isArrayTy(), "Only global arrays can have appending linkage!", GVar); } @@ -542,7 +542,7 @@ Assert(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV); forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool { - if (const Instruction *I = dyn_cast(V)) { + if (const auto *I = dyn_cast(V)) { if (!I->getParent() || !I->getParent()->getParent()) CheckFailed("Global is referenced by parentless instruction!", &GV, &M, I); @@ -551,7 +551,7 @@ I->getParent()->getParent(), I->getParent()->getParent()->getParent()); return false; - } else if (const Function *F = dyn_cast(V)) { + } else if (const auto *F = dyn_cast(V)) { if (F->getParent() != &M) CheckFailed("Global is used by function in a different module", &GV, &M, F, F->getParent()); @@ -585,8 +585,8 @@ "invalid linkage for intrinsic global variable", &GV); // Don't worry about emitting an error for it not being an array, // visitGlobalValue will complain on appending non-array. - if (ArrayType *ATy = dyn_cast(GV.getValueType())) { - StructType *STy = dyn_cast(ATy->getElementType()); + if (auto *ATy = dyn_cast(GV.getValueType())) { + auto *STy = dyn_cast(ATy->getElementType()); PointerType *FuncPtrTy = FunctionType::get(Type::getVoidTy(Context), false)->getPointerTo(); // FIXME: Reject the 2-field form in LLVM 4.0. @@ -609,12 +609,12 @@ Assert(!GV.hasInitializer() || GV.hasAppendingLinkage(), "invalid linkage for intrinsic global variable", &GV); Type *GVType = GV.getValueType(); - if (ArrayType *ATy = dyn_cast(GVType)) { - PointerType *PTy = dyn_cast(ATy->getElementType()); + if (auto *ATy = dyn_cast(GVType)) { + auto *PTy = dyn_cast(ATy->getElementType()); Assert(PTy, "wrong type for intrinsic global variable", &GV); if (GV.hasInitializer()) { const Constant *Init = GV.getInitializer(); - const ConstantArray *InitArray = dyn_cast(Init); + const auto *InitArray = dyn_cast(Init); Assert(InitArray, "wrong initalizer for intrinsic global variable", Init); for (Value *Op : InitArray->operands()) { @@ -768,12 +768,12 @@ // If this was an instruction, bb, or argument, verify that it is in the // function that we expect. Function *ActualF = nullptr; - if (Instruction *I = dyn_cast(L->getValue())) { + if (auto *I = dyn_cast(L->getValue())) { Assert(I->getParent(), "function-local metadata not in basic block", L, I); ActualF = I->getParent()->getParent(); - } else if (BasicBlock *BB = dyn_cast(L->getValue())) + } else if (auto *BB = dyn_cast(L->getValue())) ActualF = BB->getParent(); - else if (Argument *A = dyn_cast(L->getValue())) + else if (auto *A = dyn_cast(L->getValue())) ActualF = A->getParent(); assert(ActualF && "Unimplemented function local metadata case!"); @@ -1195,7 +1195,7 @@ // Validate that the requirements in the module are valid. for (const MDNode *Requirement : Requirements) { - const MDString *Flag = cast(Requirement->getOperand(0)); + const auto *Flag = cast(Requirement->getOperand(0)); const Metadata *ReqValue = Requirement->getOperand(1); const MDNode *Op = SeenIDs.lookup(Flag); @@ -1232,7 +1232,7 @@ "invalid behavior operand in module flag (unexpected constant)", Op->getOperand(0)); } - MDString *ID = dyn_cast_or_null(Op->getOperand(1)); + auto *ID = dyn_cast_or_null(Op->getOperand(1)); Assert(ID, "invalid ID operand in module flag (expected metadata string)", Op->getOperand(1)); @@ -1247,7 +1247,7 @@ case Module::Require: { // The value should itself be an MDNode with two operands, a flag ID (an // MDString), and a value. - MDNode *Value = dyn_cast(Op->getOperand(2)); + auto *Value = dyn_cast(Op->getOperand(2)); Assert(Value && Value->getNumOperands() == 2, "invalid value for 'require' module flag (expected metadata pair)", Op->getOperand(2)); @@ -1435,7 +1435,7 @@ .getAsString(Idx), V); - if (PointerType *PTy = dyn_cast(Ty)) { + if (auto *PTy = dyn_cast(Ty)) { SmallPtrSet Visited; if (!PTy->getElementType()->isSized(&Visited)) { Assert(!Attrs.hasAttribute(Idx, Attribute::ByVal) && @@ -1580,7 +1580,7 @@ if (Attrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::JumpTable)) { - const GlobalValue *GV = cast(V); + const auto *GV = cast(V); Assert(GV->hasGlobalUnnamedAddr(), "Attribute 'jumptable' requires 'unnamed_addr'", V); } @@ -1626,7 +1626,7 @@ MD); Assert(isa(MD->getOperand(0)), "expected string with name of the !prof annotation", MD); - MDString *MDS = cast(MD->getOperand(0)); + auto *MDS = cast(MD->getOperand(0)); StringRef ProfName = MDS->getString(); Assert(ProfName.equals("function_entry_count"), "first operand should be 'function_entry_count'", MD); @@ -1741,7 +1741,7 @@ auto *PT = dyn_cast(Target->getType()); Assert(PT && PT->getElementType()->isFunctionTy(), "gc.statepoint callee must be of function pointer type", &CI, Target); - FunctionType *TargetFuncType = cast(PT->getElementType()); + auto *TargetFuncType = cast(PT->getElementType()); const Value *NumCallArgsV = CS.getArgument(3); Assert(isa(NumCallArgsV), @@ -1817,7 +1817,7 @@ // gc.relocate calls which are tied to this statepoint and thus part // of the same statepoint sequence for (const User *U : CI.users()) { - const CallInst *Call = dyn_cast(U); + const auto *Call = dyn_cast(U); Assert(Call, "illegal use of statepoint token", &CI, U); if (!Call) continue; Assert(isa(Call) || isa(Call), @@ -2097,7 +2097,7 @@ for (auto &I : BB) { // Be careful about using DILocation here since we might be dealing with // broken code (this is the Verifier after all). - DILocation *DL = + auto *DL = dyn_cast_or_null(I.getDebugLoc().getAsMDNode()); if (!DL) continue; @@ -2458,8 +2458,8 @@ &I); if (SrcTy->isVectorTy()) { - VectorType *VSrc = dyn_cast(SrcTy); - VectorType *VDest = dyn_cast(DestTy); + auto *VSrc = dyn_cast(SrcTy); + auto *VDest = dyn_cast(DestTy); Assert(VSrc->getNumElements() == VDest->getNumElements(), "PtrToInt Vector width mismatch", &I); } @@ -2484,8 +2484,8 @@ Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch", &I); if (SrcTy->isVectorTy()) { - VectorType *VSrc = dyn_cast(SrcTy); - VectorType *VDest = dyn_cast(DestTy); + auto *VSrc = dyn_cast(SrcTy); + auto *VDest = dyn_cast(DestTy); Assert(VSrc->getNumElements() == VDest->getNumElements(), "IntToPtr Vector width mismatch", &I); } @@ -2546,7 +2546,7 @@ Assert(CS.getCalledValue()->getType()->isPointerTy(), "Called function must be a pointer!", I); - PointerType *FPTy = cast(CS.getCalledValue()->getType()); + auto *FPTy = cast(CS.getCalledValue()->getType()); Assert(FPTy->getElementType()->isFunctionTy(), "Called function is not pointer to function type!", I); @@ -2707,8 +2707,8 @@ static bool isTypeCongruent(Type *L, Type *R) { if (L == R) return true; - PointerType *PL = dyn_cast(L); - PointerType *PR = dyn_cast(R); + auto *PL = dyn_cast(L); + auto *PR = dyn_cast(R); if (!PL || !PR) return false; return PL->getAddressSpace() == PR->getAddressSpace(); @@ -2775,7 +2775,7 @@ Instruction *Next = CI.getNextNode(); // Handle the optional bitcast. - if (BitCastInst *BI = dyn_cast_or_null(Next)) { + if (auto *BI = dyn_cast_or_null(Next)) { Assert(BI->getOperand(0) == RetVal, "bitcast following musttail call must use the call", BI); RetVal = BI; @@ -2783,7 +2783,7 @@ } // Check the return. - ReturnInst *Ret = dyn_cast_or_null(Next); + auto *Ret = dyn_cast_or_null(Next); Assert(Ret, "musttail call must be precede a ret with an optional bitcast", &CI); Assert(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal, @@ -3024,7 +3024,7 @@ } void Verifier::visitLoadInst(LoadInst &LI) { - PointerType *PTy = dyn_cast(LI.getOperand(0)->getType()); + auto *PTy = dyn_cast(LI.getOperand(0)->getType()); Assert(PTy, "Load operand must be a pointer.", &LI); Type *ElTy = LI.getType(); Assert(LI.getAlignment() <= Value::MaximumAlignment, @@ -3051,7 +3051,7 @@ } void Verifier::visitStoreInst(StoreInst &SI) { - PointerType *PTy = dyn_cast(SI.getOperand(1)->getType()); + auto *PTy = dyn_cast(SI.getOperand(1)->getType()); Assert(PTy, "Store operand must be a pointer.", &SI); Type *ElTy = PTy->getElementType(); Assert(ElTy == SI.getOperand(0)->getType(), @@ -3153,7 +3153,7 @@ CXI.getFailureOrdering() != AtomicOrdering::AcquireRelease, "cmpxchg failure ordering cannot include release semantics", &CXI); - PointerType *PTy = dyn_cast(CXI.getOperand(0)->getType()); + auto *PTy = dyn_cast(CXI.getOperand(0)->getType()); Assert(PTy, "First cmpxchg operand must be a pointer.", &CXI); Type *ElTy = PTy->getElementType(); Assert(ElTy->isIntegerTy() || ElTy->isPointerTy(), @@ -3173,7 +3173,7 @@ "atomicrmw instructions must be atomic.", &RMWI); Assert(RMWI.getOrdering() != AtomicOrdering::Unordered, "atomicrmw instructions cannot be unordered.", &RMWI); - PointerType *PTy = dyn_cast(RMWI.getOperand(0)->getType()); + auto *PTy = dyn_cast(RMWI.getOperand(0)->getType()); Assert(PTy, "First atomicrmw operand must be a pointer.", &RMWI); Type *ElTy = PTy->getElementType(); Assert(ElTy->isIntegerTy(), "atomicrmw operand must have integer type!", @@ -3620,11 +3620,11 @@ } void Verifier::verifyDominatesUse(Instruction &I, unsigned i) { - Instruction *Op = cast(I.getOperand(i)); + auto *Op = cast(I.getOperand(i)); // If the we have an invalid invoke, don't try to compute the dominance. // We already reject it in the invoke specific checks and the dominance // computation doesn't handle multiple edges. - if (InvokeInst *II = dyn_cast(Op)) { + if (auto *II = dyn_cast(Op)) { if (II->getNormalDest() == II->getUnwindDest()) return; } @@ -3697,7 +3697,7 @@ // themselves, actually have parent basic blocks. If the use is not an // instruction, it is an error! for (Use &U : I.uses()) { - if (Instruction *Used = dyn_cast(U.getUser())) + if (auto *Used = dyn_cast(U.getUser())) Assert(Used->getParent() != nullptr, "Instruction referencing" " instruction not embedded in a basic block!", @@ -3717,7 +3717,7 @@ Assert(false, "Instruction operands must be first-class values!", &I); } - if (Function *F = dyn_cast(I.getOperand(i))) { + if (auto *F = dyn_cast(I.getOperand(i))) { // Check to make sure that the "address of" an intrinsic function is never // taken. Assert( @@ -3737,13 +3737,13 @@ &I); Assert(F->getParent() == &M, "Referencing function in another module!", &I, &M, F, F->getParent()); - } else if (BasicBlock *OpBB = dyn_cast(I.getOperand(i))) { + } else if (auto *OpBB = dyn_cast(I.getOperand(i))) { Assert(OpBB->getParent() == BB->getParent(), "Referring to a basic block in another function!", &I); - } else if (Argument *OpArg = dyn_cast(I.getOperand(i))) { + } else if (auto *OpArg = dyn_cast(I.getOperand(i))) { Assert(OpArg->getParent() == BB->getParent(), "Referring to an argument in another function!", &I); - } else if (GlobalValue *GV = dyn_cast(I.getOperand(i))) { + } else if (auto *GV = dyn_cast(I.getOperand(i))) { Assert(GV->getParent() == &M, "Referencing global in another module!", &I, &M, GV, GV->getParent()); } else if (isa(I.getOperand(i))) { @@ -3752,7 +3752,7 @@ Assert((i + 1 == e && isa(I)) || (i + 3 == e && isa(I)), "Cannot take the address of an inline asm!", &I); - } else if (ConstantExpr *CE = dyn_cast(I.getOperand(i))) { + } else if (auto *CE = dyn_cast(I.getOperand(i))) { if (CE->getType()->isPtrOrPtrVectorTy() || !DL.getNonIntegralAddressSpaces().empty()) { // If we have a ConstantExpr pointer, we need to see if it came from an @@ -3918,7 +3918,7 @@ case Intrinsic::memcpy: case Intrinsic::memmove: case Intrinsic::memset: { - ConstantInt *AlignCI = dyn_cast(CS.getArgOperand(3)); + auto *AlignCI = dyn_cast(CS.getArgOperand(3)); Assert(AlignCI, "alignment argument of memory intrinsics must be a constant int", CS); @@ -3934,7 +3934,7 @@ case Intrinsic::gcwrite: case Intrinsic::gcread: if (ID == Intrinsic::gcroot) { - AllocaInst *AI = + auto *AI = dyn_cast(CS.getArgOperand(0)->stripPointerCasts()); Assert(AI, "llvm.gcroot parameter #1 must be an alloca.", CS); Assert(isa(CS.getArgOperand(1)), @@ -3997,7 +3997,7 @@ } case Intrinsic::localrecover: { Value *FnArg = CS.getArgOperand(0)->stripPointerCasts(); - Function *Fn = dyn_cast(FnArg); + auto *Fn = dyn_cast(FnArg); Assert(Fn && !Fn->isDeclaration(), "llvm.localrecover first " "argument must be function defined in this module", @@ -4049,7 +4049,7 @@ // Check that this relocate is correctly tied to the statepoint // This is case for relocate on the unwinding path of an invoke statepoint - if (LandingPadInst *LandingPad = + if (auto *LandingPad = dyn_cast(CS.getArgOperand(0))) { const BasicBlock *InvokeBB = @@ -4133,7 +4133,7 @@ // but gc_relocate does not need to return the same pointer type as the // relocated pointer. It can be casted to the correct type later if it's // desired. However, they must have the same address space and 'vectorness' - GCRelocateInst &Relocate = cast(*CS.getInstruction()); + auto &Relocate = cast(*CS.getInstruction()); Assert(Relocate.getDerivedPtr()->getType()->getScalarType()->isPointerTy(), "gc.relocate: relocated value must be a gc pointer", CS); Index: lib/LTO/LTOModule.cpp =================================================================== --- lib/LTO/LTOModule.cpp +++ lib/LTO/LTOModule.cpp @@ -253,11 +253,11 @@ /// objcClassNameFromExpression - Get string that the data pointer points to. bool LTOModule::objcClassNameFromExpression(const Constant *c, std::string &name) { - if (const ConstantExpr *ce = dyn_cast(c)) { + if (const auto *ce = dyn_cast(c)) { Constant *op = ce->getOperand(0); - if (GlobalVariable *gvn = dyn_cast(op)) { + if (auto *gvn = dyn_cast(op)) { Constant *cn = gvn->getInitializer(); - if (ConstantDataArray *ca = dyn_cast(cn)) { + if (auto *ca = dyn_cast(cn)) { if (ca->isCString()) { name = (".objc_class_name_" + ca->getAsCString()).str(); return true; @@ -270,7 +270,7 @@ /// addObjCClass - Parse i386/ppc ObjC class data structure. void LTOModule::addObjCClass(const GlobalVariable *clgv) { - const ConstantStruct *c = dyn_cast(clgv->getInitializer()); + const auto *c = dyn_cast(clgv->getInitializer()); if (!c) return; // second slot in __OBJC,__class is pointer to superclass name @@ -304,7 +304,7 @@ /// addObjCCategory - Parse i386/ppc ObjC category data structure. void LTOModule::addObjCCategory(const GlobalVariable *clgv) { - const ConstantStruct *c = dyn_cast(clgv->getInitializer()); + const auto *c = dyn_cast(clgv->getInitializer()); if (!c) return; // second slot in __OBJC,__category is pointer to target class name @@ -386,21 +386,21 @@ // special case if this data blob is an ObjC class definition std::string Section = v->getSection(); if (Section.compare(0, 15, "__OBJC,__class,") == 0) { - if (const GlobalVariable *gv = dyn_cast(v)) { + if (const auto *gv = dyn_cast(v)) { addObjCClass(gv); } } // special case if this data blob is an ObjC category definition else if (Section.compare(0, 18, "__OBJC,__category,") == 0) { - if (const GlobalVariable *gv = dyn_cast(v)) { + if (const auto *gv = dyn_cast(v)) { addObjCCategory(gv); } } // special case if this data blob is the list of referenced classes else if (Section.compare(0, 18, "__OBJC,__cls_refs,") == 0) { - if (const GlobalVariable *gv = dyn_cast(v)) { + if (const auto *gv = dyn_cast(v)) { addObjCClassRef(gv); } } @@ -414,7 +414,7 @@ Buffer.c_str(); } - const Function *F = + const auto *F = cast(IRFile->getSymbolGV(Sym.getRawDataRefImpl())); addDefinedFunctionSymbol(Buffer, F); } @@ -434,7 +434,7 @@ if (isFunction) { attr |= LTO_SYMBOL_PERMISSIONS_CODE; } else { - const GlobalVariable *gv = dyn_cast(def); + const auto *gv = dyn_cast(def); if (gv && gv->isConstant()) attr |= LTO_SYMBOL_PERMISSIONS_RODATA; else @@ -640,11 +640,11 @@ // Linker Options if (Metadata *Val = getModule().getModuleFlag("Linker Options")) { - MDNode *LinkerOptions = cast(Val); + auto *LinkerOptions = cast(Val); for (unsigned i = 0, e = LinkerOptions->getNumOperands(); i != e; ++i) { - MDNode *MDOptions = cast(LinkerOptions->getOperand(i)); + auto *MDOptions = cast(LinkerOptions->getOperand(i)); for (unsigned ii = 0, ie = MDOptions->getNumOperands(); ii != ie; ++ii) { - MDString *MDOption = cast(MDOptions->getOperand(ii)); + auto *MDOption = cast(MDOptions->getOperand(ii)); OS << " " << MDOption->getString(); } } Index: lib/Linker/IRMover.cpp =================================================================== --- lib/Linker/IRMover.cpp +++ lib/Linker/IRMover.cpp @@ -126,7 +126,7 @@ // Okay, we have two types with identical kinds that we haven't seen before. // If this is an opaque struct type, special case it. - if (StructType *SSTy = dyn_cast(SrcTy)) { + if (auto *SSTy = dyn_cast(SrcTy)) { // Mapping an opaque type to any struct, just keep the dest struct. if (SSTy->isOpaque()) { Entry = DstTy; @@ -157,15 +157,15 @@ // Fail if any of the extra properties (e.g. array size) of the type disagree. if (isa(DstTy)) return false; // bitwidth disagrees. - if (PointerType *PT = dyn_cast(DstTy)) { + if (auto *PT = dyn_cast(DstTy)) { if (PT->getAddressSpace() != cast(SrcTy)->getAddressSpace()) return false; - } else if (FunctionType *FT = dyn_cast(DstTy)) { + } else if (auto *FT = dyn_cast(DstTy)) { if (FT->isVarArg() != cast(SrcTy)->isVarArg()) return false; - } else if (StructType *DSTy = dyn_cast(DstTy)) { - StructType *SSTy = cast(SrcTy); + } else if (auto *DSTy = dyn_cast(DstTy)) { + auto *SSTy = cast(SrcTy); if (DSTy->isLiteral() != SSTy->isLiteral() || DSTy->isPacked() != SSTy->isPacked()) return false; @@ -192,7 +192,7 @@ void TypeMapTy::linkDefinedTypeBodies() { SmallVector Elements; for (StructType *SrcSTy : SrcDefinitionsToResolve) { - StructType *DstSTy = cast(MappedTypes[SrcSTy]); + auto *DstSTy = cast(MappedTypes[SrcSTy]); assert(DstSTy->isOpaque()); // Map the body of the source type over to a new body for the dest type. @@ -548,7 +548,7 @@ if (!*NewProto) return nullptr; - GlobalValue *New = dyn_cast(*NewProto); + auto *New = dyn_cast(*NewProto); if (!New) return *NewProto; @@ -677,8 +677,8 @@ } // Unify the element type of appending arrays. - ArrayType *DAT = cast(DGV->getValueType()); - ArrayType *SAT = cast(SGV.getValueType()); + auto *DAT = cast(DGV->getValueType()); + auto *SAT = cast(SGV.getValueType()); TypeMap.addTypeMapping(DAT->getElementType(), SAT->getElementType()); } @@ -783,7 +783,7 @@ uint64_t DstNumElements = 0; if (DstGV) { - ArrayType *DstTy = cast(DstGV->getValueType()); + auto *DstTy = cast(DstGV->getValueType()); DstNumElements = DstTy->getNumElements(); if (!SrcGV->hasAppendingLinkage() || !DstGV->hasAppendingLinkage()) @@ -924,7 +924,7 @@ // Overloaded intrinsics have overloaded types names as part of their // names. If we renamed overloaded types we should rename the intrinsic // as well. - if (Function *F = dyn_cast(NewGV)) + if (auto *F = dyn_cast(NewGV)) if (auto Remangled = Intrinsic::remangleIntrinsicFunction(F)) NewGV = Remangled.getValue(); @@ -1042,7 +1042,7 @@ for (unsigned I = 0, E = DstModFlags->getNumOperands(); I != E; ++I) { MDNode *Op = DstModFlags->getOperand(I); ConstantInt *Behavior = mdconst::extract(Op->getOperand(0)); - MDString *ID = cast(Op->getOperand(1)); + auto *ID = cast(Op->getOperand(1)); if (Behavior->getZExtValue() == Module::Require) { Requirements.insert(cast(Op->getOperand(2))); @@ -1057,7 +1057,7 @@ MDNode *SrcOp = SrcModFlags->getOperand(I); ConstantInt *SrcBehavior = mdconst::extract(SrcOp->getOperand(0)); - MDString *ID = cast(SrcOp->getOperand(1)); + auto *ID = cast(SrcOp->getOperand(1)); MDNode *DstOp; unsigned DstIndex; std::tie(DstOp, DstIndex) = Flags.lookup(ID); @@ -1133,8 +1133,8 @@ continue; } case Module::Append: { - MDNode *DstValue = cast(DstOp->getOperand(2)); - MDNode *SrcValue = cast(SrcOp->getOperand(2)); + auto *DstValue = cast(DstOp->getOperand(2)); + auto *SrcValue = cast(SrcOp->getOperand(2)); SmallVector MDs; MDs.reserve(DstValue->getNumOperands() + SrcValue->getNumOperands()); MDs.append(DstValue->op_begin(), DstValue->op_end()); @@ -1145,8 +1145,8 @@ } case Module::AppendUnique: { SmallSetVector Elts; - MDNode *DstValue = cast(DstOp->getOperand(2)); - MDNode *SrcValue = cast(SrcOp->getOperand(2)); + auto *DstValue = cast(DstOp->getOperand(2)); + auto *SrcValue = cast(SrcOp->getOperand(2)); Elts.insert(DstValue->op_begin(), DstValue->op_end()); Elts.insert(SrcValue->op_begin(), SrcValue->op_end()); @@ -1160,7 +1160,7 @@ // Check all of the requirements. for (unsigned I = 0, E = Requirements.size(); I != E; ++I) { MDNode *Requirement = Requirements[I]; - MDString *Flag = cast(Requirement->getOperand(0)); + auto *Flag = cast(Requirement->getOperand(0)); Metadata *ReqValue = Requirement->getOperand(1); MDNode *Op = Flags[Flag].first; Index: lib/MC/ConstantPools.cpp =================================================================== --- lib/MC/ConstantPools.cpp +++ lib/MC/ConstantPools.cpp @@ -36,7 +36,7 @@ const MCExpr *ConstantPool::addEntry(const MCExpr *Value, MCContext &Context, unsigned Size, SMLoc Loc) { - const MCConstantExpr *C = dyn_cast(Value); + const auto *C = dyn_cast(Value); // Check if there is existing entry for the same constant. If so, reuse it. auto Itr = C ? CachedEntries.find(C->getValue()) : CachedEntries.end(); Index: lib/MC/ELFObjectWriter.cpp =================================================================== --- lib/MC/ELFObjectWriter.cpp +++ lib/MC/ELFObjectWriter.cpp @@ -460,7 +460,7 @@ uint32_t StringIndex, ELFSymbolData &MSD, const MCAsmLayout &Layout) { const auto &Symbol = cast(*MSD.Symbol); - const MCSymbolELF *Base = + const auto *Base = cast_or_null(Layout.getBaseSymbol(Symbol)); // This has to be in sync with when computeSymbolTable uses SHN_ABS or @@ -626,7 +626,7 @@ const MCFragment *Fragment, const MCFixup &Fixup, MCValue Target, bool &IsPCRel, uint64_t &FixedValue) { - const MCSectionELF &FixupSection = cast(*Fragment->getParent()); + const auto &FixupSection = cast(*Fragment->getParent()); uint64_t C = Target.getConstant(); uint64_t FixupOffset = Layout.getFragmentOffset(Fragment) + Fixup.getOffset(); MCContext &Ctx = Asm.getContext(); @@ -735,7 +735,7 @@ bool Renamed) { if (Symbol.isVariable()) { const MCExpr *Expr = Symbol.getVariableValue(); - if (const MCSymbolRefExpr *Ref = dyn_cast(Expr)) { + if (const auto *Ref = dyn_cast(Expr)) { if (Ref->getKind() == MCSymbolRefExpr::VK_WEAKREF) return false; } Index: lib/MC/MCAssembler.cpp =================================================================== --- lib/MC/MCAssembler.cpp +++ lib/MC/MCAssembler.cpp @@ -117,7 +117,7 @@ // FIXME: It looks like gas supports some cases of the form "foo + 2". It // is not clear if that is a bug or a feature. const MCExpr *Expr = Symbol->getVariableValue(); - const MCSymbolRefExpr *Ref = dyn_cast(Expr); + const auto *Ref = dyn_cast(Expr); if (!Ref) return false; @@ -261,7 +261,7 @@ return 4; case MCFragment::FT_Align: { - const MCAlignFragment &AF = cast(F); + const auto &AF = cast(F); unsigned Offset = Layout.getFragmentOffset(&AF); unsigned Size = OffsetToAlignment(Offset, AF.getAlignment()); // If we are padding with nops, force the padding to be larger than the @@ -276,7 +276,7 @@ } case MCFragment::FT_Org: { - const MCOrgFragment &OF = cast(F); + const auto &OF = cast(F); MCValue Value; if (!OF.getOffset().evaluateAsValue(Value, Layout)) report_fatal_error("expected assembly-time absolute expression"); @@ -437,7 +437,7 @@ switch (F.getKind()) { case MCFragment::FT_Align: { ++stats::EmittedAlignFragments; - const MCAlignFragment &AF = cast(F); + const auto &AF = cast(F); assert(AF.getValueSize() && "Invalid virtual align in concrete fragment!"); uint64_t Count = FragmentSize / AF.getValueSize(); @@ -492,7 +492,7 @@ case MCFragment::FT_Fill: { ++stats::EmittedFillFragments; - const MCFillFragment &FF = cast(F); + const auto &FF = cast(F); uint8_t V = FF.getValue(); const unsigned MaxChunkSize = 16; char Data[MaxChunkSize]; @@ -511,20 +511,20 @@ } case MCFragment::FT_LEB: { - const MCLEBFragment &LF = cast(F); + const auto &LF = cast(F); OW->writeBytes(LF.getContents()); break; } case MCFragment::FT_SafeSEH: { - const MCSafeSEHFragment &SF = cast(F); + const auto &SF = cast(F); OW->write32(SF.getSymbol()->getIndex()); break; } case MCFragment::FT_Org: { ++stats::EmittedOrgFragments; - const MCOrgFragment &OF = cast(F); + const auto &OF = cast(F); for (uint64_t i = 0, e = FragmentSize; i != e; ++i) OW->write8(uint8_t(OF.getValue())); @@ -533,12 +533,12 @@ } case MCFragment::FT_Dwarf: { - const MCDwarfLineAddrFragment &OF = cast(F); + const auto &OF = cast(F); OW->writeBytes(OF.getContents()); break; } case MCFragment::FT_DwarfFrame: { - const MCDwarfCallFrameFragment &CF = cast(F); + const auto &CF = cast(F); OW->writeBytes(CF.getContents()); break; } @@ -574,7 +574,7 @@ // Check that we aren't trying to write a non-zero contents (or fixups) // into a virtual section. This is to support clients which use standard // directives to fill the contents of virtual sections. - const MCDataFragment &DF = cast(F); + const auto &DF = cast(F); if (DF.fixup_begin() != DF.fixup_end()) report_fatal_error("cannot have fixups in virtual section!"); for (unsigned i = 0, e = DF.getContents().size(); i != e; ++i) Index: lib/MC/MCELFStreamer.cpp =================================================================== --- lib/MC/MCELFStreamer.cpp +++ lib/MC/MCELFStreamer.cpp @@ -391,7 +391,7 @@ break; case MCExpr::Binary: { - const MCBinaryExpr *be = cast(expr); + const auto *be = cast(expr); fixSymbolsInTLSFixups(be->getLHS()); fixSymbolsInTLSFixups(be->getRHS()); break; Index: lib/MC/MCExpr.cpp =================================================================== --- lib/MC/MCExpr.cpp +++ lib/MC/MCExpr.cpp @@ -39,7 +39,7 @@ return; case MCExpr::SymbolRef: { - const MCSymbolRefExpr &SRE = cast(*this); + const auto &SRE = cast(*this); const MCSymbol &Sym = SRE.getSymbol(); // Parenthesize names that start with $ so that they don't look like // absolute names. @@ -59,7 +59,7 @@ } case MCExpr::Unary: { - const MCUnaryExpr &UE = cast(*this); + const auto &UE = cast(*this); switch (UE.getOpcode()) { case MCUnaryExpr::LNot: OS << '!'; break; case MCUnaryExpr::Minus: OS << '-'; break; @@ -71,7 +71,7 @@ } case MCExpr::Binary: { - const MCBinaryExpr &BE = cast(*this); + const auto &BE = cast(*this); // Only print parens around the LHS if it is non-trivial. if (isa(BE.getLHS()) || isa(BE.getLHS())) { @@ -85,7 +85,7 @@ switch (BE.getOpcode()) { case MCBinaryExpr::Add: // Print "X-42" instead of "X+-42". - if (const MCConstantExpr *RHSC = dyn_cast(BE.getRHS())) { + if (const auto *RHSC = dyn_cast(BE.getRHS())) { if (RHSC->getValue() < 0) { OS << RHSC->getValue(); return; @@ -436,7 +436,7 @@ MCValue Value; // Fast path constants. - if (const MCConstantExpr *CE = dyn_cast(this)) { + if (const auto *CE = dyn_cast(this)) { Res = CE->getValue(); return true; } @@ -622,7 +622,7 @@ return true; case SymbolRef: { - const MCSymbolRefExpr *SRE = cast(this); + const auto *SRE = cast(this); const MCSymbol &Sym = SRE->getSymbol(); // Evaluate recursively if this is a variable. @@ -652,7 +652,7 @@ } case Unary: { - const MCUnaryExpr *AUE = cast(this); + const auto *AUE = cast(this); MCValue Value; if (!AUE->getSubExpr()->evaluateAsRelocatableImpl(Value, Asm, Layout, Fixup, @@ -688,7 +688,7 @@ } case Binary: { - const MCBinaryExpr *ABE = cast(this); + const auto *ABE = cast(this); MCValue LHSValue, RHSValue; if (!ABE->getLHS()->evaluateAsRelocatableImpl(LHSValue, Asm, Layout, Fixup, @@ -772,7 +772,7 @@ return MCSymbol::AbsolutePseudoFragment; case SymbolRef: { - const MCSymbolRefExpr *SRE = cast(this); + const auto *SRE = cast(this); const MCSymbol &Sym = SRE->getSymbol(); return Sym.getFragment(); } @@ -781,7 +781,7 @@ return cast(this)->getSubExpr()->findAssociatedFragment(); case Binary: { - const MCBinaryExpr *BE = cast(this); + const auto *BE = cast(this); MCFragment *LHS_F = BE->getLHS()->findAssociatedFragment(); MCFragment *RHS_F = BE->getRHS()->findAssociatedFragment(); Index: lib/MC/MCFragment.cpp =================================================================== --- lib/MC/MCFragment.cpp +++ lib/MC/MCFragment.cpp @@ -338,7 +338,7 @@ switch (getKind()) { case MCFragment::FT_Align: { - const MCAlignFragment *AF = cast(this); + const auto *AF = cast(this); if (AF->hasEmitNops()) OS << " (emit nops)"; OS << "\n "; @@ -348,7 +348,7 @@ break; } case MCFragment::FT_Data: { - const MCDataFragment *DF = cast(this); + const auto *DF = cast(this); OS << "\n "; OS << " Contents:["; const SmallVectorImpl &Contents = DF->getContents(); @@ -371,7 +371,7 @@ break; } case MCFragment::FT_CompactEncodedInst: { - const MCCompactEncodedInstFragment *CEIF = + const auto *CEIF = cast(this); OS << "\n "; OS << " Contents:["; @@ -384,44 +384,44 @@ break; } case MCFragment::FT_Fill: { - const MCFillFragment *FF = cast(this); + const auto *FF = cast(this); OS << " Value:" << FF->getValue() << " Size:" << FF->getSize(); break; } case MCFragment::FT_Relaxable: { - const MCRelaxableFragment *F = cast(this); + const auto *F = cast(this); OS << "\n "; OS << " Inst:"; F->getInst().dump_pretty(OS); break; } case MCFragment::FT_Org: { - const MCOrgFragment *OF = cast(this); + const auto *OF = cast(this); OS << "\n "; OS << " Offset:" << OF->getOffset() << " Value:" << OF->getValue(); break; } case MCFragment::FT_Dwarf: { - const MCDwarfLineAddrFragment *OF = cast(this); + const auto *OF = cast(this); OS << "\n "; OS << " AddrDelta:" << OF->getAddrDelta() << " LineDelta:" << OF->getLineDelta(); break; } case MCFragment::FT_DwarfFrame: { - const MCDwarfCallFrameFragment *CF = cast(this); + const auto *CF = cast(this); OS << "\n "; OS << " AddrDelta:" << CF->getAddrDelta(); break; } case MCFragment::FT_LEB: { - const MCLEBFragment *LF = cast(this); + const auto *LF = cast(this); OS << "\n "; OS << " Value:" << LF->getValue() << " Signed:" << LF->isSigned(); break; } case MCFragment::FT_SafeSEH: { - const MCSafeSEHFragment *F = cast(this); + const auto *F = cast(this); OS << "\n "; OS << " Sym:" << F->getSymbol(); break; Index: lib/MC/MCMachOStreamer.cpp =================================================================== --- lib/MC/MCMachOStreamer.cpp +++ lib/MC/MCMachOStreamer.cpp @@ -286,7 +286,7 @@ bool MCMachOStreamer::EmitSymbolAttribute(MCSymbol *Sym, MCSymbolAttr Attribute) { - MCSymbolMachO *Symbol = cast(Sym); + auto *Symbol = cast(Sym); // Indirect symbols are handled differently, to match how 'as' handles // them. This makes writing matching .o files easier. Index: lib/MC/MCObjectStreamer.cpp =================================================================== --- lib/MC/MCObjectStreamer.cpp +++ lib/MC/MCObjectStreamer.cpp @@ -100,7 +100,7 @@ } MCDataFragment *MCObjectStreamer::getOrCreateDataFragment() { - MCDataFragment *F = dyn_cast_or_null(getCurrentFragment()); + auto *F = dyn_cast_or_null(getCurrentFragment()); // When bundling is enabled, we don't want to add data to a fragment that // already has instructions (see MCELFStreamer::EmitInstToData for details) if (!F || (Assembler->isBundlingEnabled() && !Assembler->getRelaxAll() && Index: lib/MC/MCParser/AsmParser.cpp =================================================================== --- lib/MC/MCParser/AsmParser.cpp +++ lib/MC/MCParser/AsmParser.cpp @@ -1145,7 +1145,7 @@ return nullptr; case MCExpr::SymbolRef: { - const MCSymbolRefExpr *SRE = cast(E); + const auto *SRE = cast(E); if (SRE->getKind() != MCSymbolRefExpr::VK_None) { TokError("invalid variant on expression '" + getTok().getIdentifier() + @@ -1157,7 +1157,7 @@ } case MCExpr::Unary: { - const MCUnaryExpr *UE = cast(E); + const auto *UE = cast(E); const MCExpr *Sub = applyModifierToExpr(UE->getSubExpr(), Variant); if (!Sub) return nullptr; @@ -1165,7 +1165,7 @@ } case MCExpr::Binary: { - const MCBinaryExpr *BE = cast(E); + const auto *BE = cast(E); const MCExpr *LHS = applyModifierToExpr(BE->getLHS(), Variant); const MCExpr *RHS = applyModifierToExpr(BE->getRHS(), Variant); @@ -2783,7 +2783,7 @@ if (checkForValidSection() || parseExpression(Value)) return true; // Special case constant expressions to match code generator. - if (const MCConstantExpr *MCE = dyn_cast(Value)) { + if (const auto *MCE = dyn_cast(Value)) { assert(Size <= 8 && "Invalid size"); uint64_t IntValue = MCE->getValue(); if (!isUIntN(8 * Size, IntValue) && !isIntN(8 * Size, IntValue)) @@ -3197,7 +3197,7 @@ if (parseExpression(Value)) return true; // The expression must be the constant 0 or 1. - if (const MCConstantExpr *MCE = dyn_cast(Value)) { + if (const auto *MCE = dyn_cast(Value)) { int Value = MCE->getValue(); if (Value == 0) Flags &= ~DWARF2_FLAG_IS_STMT; @@ -3214,7 +3214,7 @@ if (parseExpression(Value)) return true; // The expression must be a constant greater or equal to 0. - if (const MCConstantExpr *MCE = dyn_cast(Value)) { + if (const auto *MCE = dyn_cast(Value)) { int Value = MCE->getValue(); if (Value < 0) return Error(Loc, "isa number less than zero"); @@ -4232,7 +4232,7 @@ return true; // Special case constant expressions to match code generator. - if (const MCConstantExpr *MCE = dyn_cast(Value)) { + if (const auto *MCE = dyn_cast(Value)) { assert(Size <= 8 && "Invalid size"); uint64_t IntValue = MCE->getValue(); if (!isUIntN(8 * Size, IntValue) && !isIntN(8 * Size, IntValue)) @@ -5155,7 +5155,7 @@ SMLoc ExprLoc = getLexer().getLoc(); if (parseExpression(Value)) return true; - const MCConstantExpr *MCE = dyn_cast(Value); + const auto *MCE = dyn_cast(Value); if (!MCE) return Error(ExprLoc, "unexpected expression in _emit"); uint64_t IntValue = MCE->getValue(); @@ -5171,7 +5171,7 @@ SMLoc ExprLoc = getLexer().getLoc(); if (parseExpression(Value)) return true; - const MCConstantExpr *MCE = dyn_cast(Value); + const auto *MCE = dyn_cast(Value); if (!MCE) return Error(ExprLoc, "unexpected expression in align"); uint64_t IntValue = MCE->getValue(); Index: lib/MC/MCParser/ELFAsmParser.cpp =================================================================== --- lib/MC/MCParser/ELFAsmParser.cpp +++ lib/MC/MCParser/ELFAsmParser.cpp @@ -200,7 +200,7 @@ StringRef Name; if (getParser().parseIdentifier(Name)) return TokError("expected identifier in directive"); - MCSymbolELF *Sym = cast(getContext().getOrCreateSymbol(Name)); + auto *Sym = cast(getContext().getOrCreateSymbol(Name)); if (getLexer().isNot(AsmToken::Comma)) return TokError("unexpected token in directive"); @@ -513,7 +513,7 @@ if (UseLastGroup) { MCSectionSubPair CurrentSection = getStreamer().getCurrentSection(); - if (const MCSectionELF *Section = + if (const auto *Section = cast_or_null(CurrentSection.first)) if (const MCSymbol *Group = Section->getGroup()) { GroupName = Group->getName(); Index: lib/MC/MCStreamer.cpp =================================================================== --- lib/MC/MCStreamer.cpp +++ lib/MC/MCStreamer.cpp @@ -747,7 +747,7 @@ break; case MCExpr::Binary: { - const MCBinaryExpr &BE = cast(Expr); + const auto &BE = cast(Expr); visitUsedExpr(*BE.getLHS()); visitUsedExpr(*BE.getRHS()); break; Index: lib/MC/MachObjectWriter.cpp =================================================================== --- lib/MC/MachObjectWriter.cpp +++ lib/MC/MachObjectWriter.cpp @@ -74,7 +74,7 @@ const MCAsmLayout &Layout) const { // If this is a variable, then recursively evaluate now. if (S.isVariable()) { - if (const MCConstantExpr *C = + if (const auto *C = dyn_cast(S.getVariableValue())) return C->getValue(); @@ -199,7 +199,7 @@ uint64_t RelocationsStart, unsigned NumRelocations) { uint64_t SectionSize = Layout.getSectionAddressSize(&Sec); - const MCSectionMachO &Section = cast(Sec); + const auto &Section = cast(Sec); // The offset is unused for virtual sections. if (Section.isVirtualSection()) { @@ -453,7 +453,7 @@ // or stub section. for (MCAssembler::indirect_symbol_iterator it = Asm.indirect_symbol_begin(), ie = Asm.indirect_symbol_end(); it != ie; ++it) { - const MCSectionMachO &Section = cast(*it->Section); + const auto &Section = cast(*it->Section); if (Section.getType() != MachO::S_NON_LAZY_SYMBOL_POINTERS && Section.getType() != MachO::S_LAZY_SYMBOL_POINTERS && @@ -469,7 +469,7 @@ unsigned IndirectIndex = 0; for (MCAssembler::indirect_symbol_iterator it = Asm.indirect_symbol_begin(), ie = Asm.indirect_symbol_end(); it != ie; ++it, ++IndirectIndex) { - const MCSectionMachO &Section = cast(*it->Section); + const auto &Section = cast(*it->Section); if (Section.getType() != MachO::S_NON_LAZY_SYMBOL_POINTERS && Section.getType() != MachO::S_THREAD_LOCAL_VARIABLE_POINTERS) @@ -485,7 +485,7 @@ IndirectIndex = 0; for (MCAssembler::indirect_symbol_iterator it = Asm.indirect_symbol_begin(), ie = Asm.indirect_symbol_end(); it != ie; ++it, ++IndirectIndex) { - const MCSectionMachO &Section = cast(*it->Section); + const auto &Section = cast(*it->Section); if (Section.getType() != MachO::S_LAZY_SYMBOL_POINTERS && Section.getType() != MachO::S_SYMBOL_STUBS) Index: lib/MC/WinCOFFObjectWriter.cpp =================================================================== --- lib/MC/WinCOFFObjectWriter.cpp +++ lib/MC/WinCOFFObjectWriter.cpp @@ -355,7 +355,7 @@ if (!Symbol.isVariable()) return nullptr; - const MCSymbolRefExpr *SymRef = + const auto *SymRef = dyn_cast(Symbol.getVariableValue()); if (!SymRef) return nullptr; @@ -415,7 +415,7 @@ if (Local) { Local->Data.Value = getSymbolValue(Symbol, Layout); - const MCSymbolCOFF &SymbolCOFF = cast(Symbol); + const auto &SymbolCOFF = cast(Symbol); Local->Data.Type = SymbolCOFF.getType(); Local->Data.StorageClass = SymbolCOFF.getClass(); Index: lib/MC/WinCOFFStreamer.cpp =================================================================== --- lib/MC/WinCOFFStreamer.cpp +++ lib/MC/WinCOFFStreamer.cpp @@ -167,7 +167,7 @@ Triple::x86) return; - const MCSymbolCOFF *CSymbol = cast(Symbol); + const auto *CSymbol = cast(Symbol); if (CSymbol->isSafeSEH()) return; Index: lib/Object/ModuleSymbolTable.cpp =================================================================== --- lib/Object/ModuleSymbolTable.cpp +++ lib/Object/ModuleSymbolTable.cpp @@ -160,7 +160,7 @@ Res |= BasicSymbolRef::SF_Undefined; else if (GV->hasHiddenVisibility() && !GV->hasLocalLinkage()) Res |= BasicSymbolRef::SF_Hidden; - if (const GlobalVariable *GVar = dyn_cast(GV)) { + if (const auto *GVar = dyn_cast(GV)) { if (GVar->isConstant()) Res |= BasicSymbolRef::SF_Const; } Index: lib/ProfileData/InstrProf.cpp =================================================================== --- lib/ProfileData/InstrProf.cpp +++ lib/ProfileData/InstrProf.cpp @@ -733,7 +733,7 @@ return false; // Operand 0 is a string tag "VP": - MDString *Tag = cast(MD->getOperand(0)); + auto *Tag = cast(MD->getOperand(0)); if (!Tag) return false; Index: lib/Support/YAMLTraits.cpp =================================================================== --- lib/Support/YAMLTraits.cpp +++ lib/Support/YAMLTraits.cpp @@ -112,7 +112,7 @@ if (EC) return; // CurrentNode can be null if the document is empty. - MapHNode *MN = dyn_cast_or_null(CurrentNode); + auto *MN = dyn_cast_or_null(CurrentNode); if (MN) { MN->ValidKeys.clear(); } @@ -132,7 +132,7 @@ return false; } - MapHNode *MN = dyn_cast(CurrentNode); + auto *MN = dyn_cast(CurrentNode); if (!MN) { setError(CurrentNode, "not a mapping"); return false; @@ -159,7 +159,7 @@ if (EC) return; // CurrentNode can be null if the document is empty. - MapHNode *MN = dyn_cast_or_null(CurrentNode); + auto *MN = dyn_cast_or_null(CurrentNode); if (!MN) return; for (const auto &NN : MN->Mapping) { @@ -175,12 +175,12 @@ void Input::endFlowMapping() { endMapping(); } unsigned Input::beginSequence() { - if (SequenceHNode *SQ = dyn_cast(CurrentNode)) + if (auto *SQ = dyn_cast(CurrentNode)) return SQ->Entries.size(); if (isa(CurrentNode)) return 0; // Treat case where there's a scalar "null" value as an empty sequence. - if (ScalarHNode *SN = dyn_cast(CurrentNode)) { + if (auto *SN = dyn_cast(CurrentNode)) { if (isNull(SN->value())) return 0; } @@ -195,7 +195,7 @@ bool Input::preflightElement(unsigned Index, void *&SaveInfo) { if (EC) return false; - if (SequenceHNode *SQ = dyn_cast(CurrentNode)) { + if (auto *SQ = dyn_cast(CurrentNode)) { SaveInfo = CurrentNode; CurrentNode = SQ->Entries[Index].get(); return true; @@ -212,7 +212,7 @@ bool Input::preflightFlowElement(unsigned index, void *&SaveInfo) { if (EC) return false; - if (SequenceHNode *SQ = dyn_cast(CurrentNode)) { + if (auto *SQ = dyn_cast(CurrentNode)) { SaveInfo = CurrentNode; CurrentNode = SQ->Entries[index].get(); return true; @@ -234,7 +234,7 @@ bool Input::matchEnumScalar(const char *Str, bool) { if (ScalarMatchFound) return false; - if (ScalarHNode *SN = dyn_cast(CurrentNode)) { + if (auto *SN = dyn_cast(CurrentNode)) { if (SN->value().equals(Str)) { ScalarMatchFound = true; return true; @@ -258,7 +258,7 @@ bool Input::beginBitSetScalar(bool &DoClear) { BitValuesUsed.clear(); - if (SequenceHNode *SQ = dyn_cast(CurrentNode)) { + if (auto *SQ = dyn_cast(CurrentNode)) { BitValuesUsed.insert(BitValuesUsed.begin(), SQ->Entries.size(), false); } else { setError(CurrentNode, "expected sequence of bit values"); @@ -270,10 +270,10 @@ bool Input::bitSetMatch(const char *Str, bool) { if (EC) return false; - if (SequenceHNode *SQ = dyn_cast(CurrentNode)) { + if (auto *SQ = dyn_cast(CurrentNode)) { unsigned Index = 0; for (auto &N : SQ->Entries) { - if (ScalarHNode *SN = dyn_cast(N.get())) { + if (auto *SN = dyn_cast(N.get())) { if (SN->value().equals(Str)) { BitValuesUsed[Index] = true; return true; @@ -292,7 +292,7 @@ void Input::endBitSetScalar() { if (EC) return; - if (SequenceHNode *SQ = dyn_cast(CurrentNode)) { + if (auto *SQ = dyn_cast(CurrentNode)) { assert(BitValuesUsed.size() == SQ->Entries.size()); for (unsigned i = 0; i < SQ->Entries.size(); ++i) { if (!BitValuesUsed[i]) { @@ -304,7 +304,7 @@ } void Input::scalarString(StringRef &S, bool) { - if (ScalarHNode *SN = dyn_cast(CurrentNode)) { + if (auto *SN = dyn_cast(CurrentNode)) { S = SN->value(); } else { setError(CurrentNode, "unexpected scalar"); @@ -325,17 +325,17 @@ std::unique_ptr Input::createHNodes(Node *N) { SmallString<128> StringStorage; - if (ScalarNode *SN = dyn_cast(N)) { + if (auto *SN = dyn_cast(N)) { StringRef KeyStr = SN->getValue(StringStorage); if (!StringStorage.empty()) { // Copy string to permanent storage KeyStr = StringStorage.str().copy(StringAllocator); } return llvm::make_unique(N, KeyStr); - } else if (BlockScalarNode *BSN = dyn_cast(N)) { + } else if (auto *BSN = dyn_cast(N)) { StringRef ValueCopy = BSN->getValue().copy(StringAllocator); return llvm::make_unique(N, ValueCopy); - } else if (SequenceNode *SQ = dyn_cast(N)) { + } else if (auto *SQ = dyn_cast(N)) { auto SQHNode = llvm::make_unique(N); for (Node &SN : *SQ) { auto Entry = this->createHNodes(&SN); @@ -344,11 +344,11 @@ SQHNode->Entries.push_back(std::move(Entry)); } return std::move(SQHNode); - } else if (MappingNode *Map = dyn_cast(N)) { + } else if (auto *Map = dyn_cast(N)) { auto mapHNode = llvm::make_unique(N); for (KeyValueNode &KVN : *Map) { Node *KeyNode = KVN.getKey(); - ScalarNode *KeyScalar = dyn_cast(KeyNode); + auto *KeyScalar = dyn_cast(KeyNode); if (!KeyScalar) { setError(KeyNode, "Map key must be a scalar"); break; Index: lib/TableGen/Record.cpp =================================================================== --- lib/TableGen/Record.cpp +++ lib/TableGen/Record.cpp @@ -56,7 +56,7 @@ bool BitRecTy::typeIsConvertibleTo(const RecTy *RHS) const{ if (RecTy::typeIsConvertibleTo(RHS) || RHS->getRecTyKind() == IntRecTyKind) return true; - if (const BitsRecTy *BitsTy = dyn_cast(RHS)) + if (const auto *BitsTy = dyn_cast(RHS)) return BitsTy->getNumBits() == 1; return false; } @@ -114,7 +114,7 @@ } bool RecordRecTy::typeIsConvertibleTo(const RecTy *RHS) const { - const RecordRecTy *RTy = dyn_cast(RHS); + const auto *RTy = dyn_cast(RHS); if (!RTy) return false; @@ -135,7 +135,7 @@ return T1; // If one is a Record type, check superclasses - if (RecordRecTy *RecTy1 = dyn_cast(T1)) { + if (auto *RecTy1 = dyn_cast(T1)) { // See if T2 inherits from a type T1 also inherits from for (const auto &SuperPair1 : RecTy1->getRecord()->getSuperClasses()) { RecordRecTy *SuperRecTy1 = RecordRecTy::get(SuperPair1.first); @@ -144,7 +144,7 @@ return NewType1; } } - if (RecordRecTy *RecTy2 = dyn_cast(T2)) { + if (auto *RecTy2 = dyn_cast(T2)) { // See if T1 inherits from a type T2 also inherits from for (const auto &SuperPair2 : RecTy2->getRecord()->getSuperClasses()) { RecordRecTy *SuperRecTy2 = RecordRecTy::get(SuperPair2.first); @@ -518,7 +518,7 @@ Record *ListInit::getElementAsRecord(unsigned i) const { assert(i < NumValues && "List element index out of range!"); - DefInit *DI = dyn_cast(getElement(i)); + auto *DI = dyn_cast(getElement(i)); if (!DI) PrintFatalError("Expected record in list!"); return DI->getDef(); @@ -572,13 +572,13 @@ Init *OpInit::resolveListElementReference(Record &R, const RecordVal *IRV, unsigned Elt) const { Init *Resolved = resolveReferences(R, IRV); - OpInit *OResolved = dyn_cast(Resolved); + auto *OResolved = dyn_cast(Resolved); if (OResolved) { Resolved = OResolved->Fold(&R, nullptr); } if (Resolved != this) { - TypedInit *Typed = cast(Resolved); + auto *Typed = cast(Resolved); if (Init *New = Typed->resolveListElementReference(R, IRV, Elt)) return New; return VarListElementInit::get(Typed, Elt); @@ -625,16 +625,16 @@ switch (getOpcode()) { case CAST: { if (isa(getType())) { - if (StringInit *LHSs = dyn_cast(LHS)) + if (auto *LHSs = dyn_cast(LHS)) return LHSs; - if (DefInit *LHSd = dyn_cast(LHS)) + if (auto *LHSd = dyn_cast(LHS)) return StringInit::get(LHSd->getAsString()); - if (IntInit *LHSi = dyn_cast(LHS)) + if (auto *LHSi = dyn_cast(LHS)) return StringInit::get(LHSi->getAsString()); } else { - if (StringInit *Name = dyn_cast(LHS)) { + if (auto *Name = dyn_cast(LHS)) { // From TGParser::ParseIDValue if (CurRec) { if (const RecordVal *RV = CurRec->getValue(Name)) { @@ -680,7 +680,7 @@ } if (isa(getType())) { - if (BitsInit *BI = dyn_cast(LHS)) { + if (auto *BI = dyn_cast(LHS)) { if (Init *NewInit = BI->convertInitializerTo(IntRecTy::get())) return NewInit; break; @@ -690,14 +690,14 @@ break; } case HEAD: { - if (ListInit *LHSl = dyn_cast(LHS)) { + if (auto *LHSl = dyn_cast(LHS)) { assert(!LHSl->empty() && "Empty list in head"); return LHSl->getElement(0); } break; } case TAIL: { - if (ListInit *LHSl = dyn_cast(LHS)) { + if (auto *LHSl = dyn_cast(LHS)) { assert(!LHSl->empty() && "Empty list in tail"); // Note the +1. We can't just pass the result of getValues() // directly. @@ -706,9 +706,9 @@ break; } case EMPTY: { - if (ListInit *LHSl = dyn_cast(LHS)) + if (auto *LHSl = dyn_cast(LHS)) return IntInit::get(LHSl->empty()); - if (StringInit *LHSs = dyn_cast(LHS)) + if (auto *LHSs = dyn_cast(LHS)) return IntInit::get(LHSs->getValue().empty()); break; @@ -777,11 +777,11 @@ Init *BinOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const { switch (getOpcode()) { case CONCAT: { - DagInit *LHSs = dyn_cast(LHS); - DagInit *RHSs = dyn_cast(RHS); + auto *LHSs = dyn_cast(LHS); + auto *RHSs = dyn_cast(RHS); if (LHSs && RHSs) { - DefInit *LOp = dyn_cast(LHSs->getOperator()); - DefInit *ROp = dyn_cast(RHSs->getOperator()); + auto *LOp = dyn_cast(LHSs->getOperator()); + auto *ROp = dyn_cast(RHSs->getOperator()); if (!LOp || !ROp || LOp->getDef() != ROp->getDef()) PrintFatalError("Concated Dag operators do not match!"); SmallVector Args; @@ -799,8 +799,8 @@ break; } case LISTCONCAT: { - ListInit *LHSs = dyn_cast(LHS); - ListInit *RHSs = dyn_cast(RHS); + auto *LHSs = dyn_cast(LHS); + auto *RHSs = dyn_cast(RHS); if (LHSs && RHSs) { SmallVector Args; Args.insert(Args.end(), LHSs->begin(), LHSs->end()); @@ -811,8 +811,8 @@ break; } case STRCONCAT: { - StringInit *LHSs = dyn_cast(LHS); - StringInit *RHSs = dyn_cast(RHS); + auto *LHSs = dyn_cast(LHS); + auto *RHSs = dyn_cast(RHS); if (LHSs && RHSs) return ConcatStringInits(LHSs, RHSs); break; @@ -820,16 +820,16 @@ case EQ: { // try to fold eq comparison for 'bit' and 'int', otherwise fallback // to string objects. - IntInit *L = + auto *L = dyn_cast_or_null(LHS->convertInitializerTo(IntRecTy::get())); - IntInit *R = + auto *R = dyn_cast_or_null(RHS->convertInitializerTo(IntRecTy::get())); if (L && R) return IntInit::get(L->getValue() == R->getValue()); - StringInit *LHSs = dyn_cast(LHS); - StringInit *RHSs = dyn_cast(RHS); + auto *LHSs = dyn_cast(LHS); + auto *RHSs = dyn_cast(RHS); // Make sure we've resolved if (LHSs && RHSs) @@ -843,9 +843,9 @@ case SHL: case SRA: case SRL: { - IntInit *LHSi = + auto *LHSi = dyn_cast_or_null(LHS->convertInitializerTo(IntRecTy::get())); - IntInit *RHSi = + auto *RHSi = dyn_cast_or_null(RHS->convertInitializerTo(IntRecTy::get())); if (LHSi && RHSi) { int64_t LHSv = LHSi->getValue(), RHSv = RHSi->getValue(); @@ -961,17 +961,17 @@ static Init *ForeachHelper(Init *LHS, Init *MHS, Init *RHS, RecTy *Type, Record *CurRec, MultiClass *CurMultiClass) { - OpInit *RHSo = dyn_cast(RHS); + auto *RHSo = dyn_cast(RHS); if (!RHSo) PrintFatalError(CurRec->getLoc(), "!foreach requires an operator\n"); - TypedInit *LHSt = dyn_cast(LHS); + auto *LHSt = dyn_cast(LHS); if (!LHSt) PrintFatalError(CurRec->getLoc(), "!foreach requires typed variable\n"); - DagInit *MHSd = dyn_cast(MHS); + auto *MHSd = dyn_cast(MHS); if (MHSd && isa(Type)) { Init *Val = MHSd->getOperator(); if (Init *Result = EvaluateOperation(RHSo, LHS, Val, @@ -995,7 +995,7 @@ return DagInit::get(Val, nullptr, args); } - ListInit *MHSl = dyn_cast(MHS); + auto *MHSl = dyn_cast(MHS); if (MHSl && isa(Type)) { SmallVector NewOperands; SmallVector NewList(MHSl->begin(), MHSl->end()); @@ -1024,17 +1024,17 @@ Init *TernOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const { switch (getOpcode()) { case SUBST: { - DefInit *LHSd = dyn_cast(LHS); - VarInit *LHSv = dyn_cast(LHS); - StringInit *LHSs = dyn_cast(LHS); + auto *LHSd = dyn_cast(LHS); + auto *LHSv = dyn_cast(LHS); + auto *LHSs = dyn_cast(LHS); - DefInit *MHSd = dyn_cast(MHS); - VarInit *MHSv = dyn_cast(MHS); - StringInit *MHSs = dyn_cast(MHS); + auto *MHSd = dyn_cast(MHS); + auto *MHSv = dyn_cast(MHS); + auto *MHSs = dyn_cast(MHS); - DefInit *RHSd = dyn_cast(RHS); - VarInit *RHSv = dyn_cast(RHS); - StringInit *RHSs = dyn_cast(RHS); + auto *RHSd = dyn_cast(RHS); + auto *RHSv = dyn_cast(RHS); + auto *RHSs = dyn_cast(RHS); if (LHSd && MHSd && RHSd) { Record *Val = RHSd->getDef(); @@ -1074,7 +1074,7 @@ } case IF: { - IntInit *LHSi = dyn_cast(LHS); + auto *LHSi = dyn_cast(LHS); if (Init *I = LHS->convertInitializerTo(IntRecTy::get())) LHSi = dyn_cast(I); if (LHSi) { @@ -1094,7 +1094,7 @@ Init *lhs = LHS->resolveReferences(R, RV); if (getOpcode() == IF && lhs != LHS) { - IntInit *Value = dyn_cast(lhs); + auto *Value = dyn_cast(lhs); if (Init *I = lhs->convertInitializerTo(IntRecTy::get())) Value = dyn_cast(I); if (Value) { @@ -1131,7 +1131,7 @@ } RecTy *TypedInit::getFieldType(StringInit *FieldName) const { - if (RecordRecTy *RecordType = dyn_cast(getType())) + if (auto *RecordType = dyn_cast(getType())) if (RecordVal *Field = RecordType->getRecord()->getValue(FieldName)) return Field->getType(); return nullptr; @@ -1209,7 +1209,7 @@ if (auto *SRRT = dyn_cast(Ty)) { // Ensure that this is compatible with Rec. - if (RecordRecTy *DRRT = dyn_cast(getType())) + if (auto *DRRT = dyn_cast(getType())) if (DRRT->getRecord()->isSubClassOf(SRRT->getRecord()) || DRRT->getRecord() == SRRT->getRecord()) return const_cast(this); @@ -1220,7 +1220,7 @@ } Init *TypedInit::convertInitializerBitRange(ArrayRef Bits) const { - BitsRecTy *T = dyn_cast(getType()); + auto *T = dyn_cast(getType()); if (!T) return nullptr; // Cannot subscript a non-bits variable. unsigned NumBits = T->getNumBits(); @@ -1236,7 +1236,7 @@ } Init *TypedInit::convertInitListSlice(ArrayRef Elements) const { - ListRecTy *T = dyn_cast(getType()); + auto *T = dyn_cast(getType()); if (!T) return nullptr; // Cannot subscript a non-list variable. if (Elements.size() == 1) @@ -1269,7 +1269,7 @@ } StringRef VarInit::getName() const { - StringInit *NameString = cast(getNameInit()); + auto *NameString = cast(getNameInit()); return NameString->getValue(); } @@ -1287,7 +1287,7 @@ RecordVal *RV = R.getValue(getNameInit()); assert(RV && "Reference to a non-existent variable?"); - ListInit *LI = dyn_cast(RV->getValue()); + auto *LI = dyn_cast(RV->getValue()); if (!LI) return VarListElementInit::get(cast(RV->getValue()), Elt); @@ -1303,7 +1303,7 @@ } RecTy *VarInit::getFieldType(StringInit *FieldName) const { - if (RecordRecTy *RTy = dyn_cast(getType())) + if (auto *RTy = dyn_cast(getType())) if (const RecordVal *RV = RTy->getRecord()->getValue(FieldName)) return RV->getType(); return nullptr; @@ -1396,7 +1396,7 @@ const RecordVal *RV, unsigned Elt) const { if (Init *Result = TI->resolveListElementReference(R, RV, Element)) { - if (TypedInit *TInit = dyn_cast(Result)) { + if (auto *TInit = dyn_cast(Result)) { if (Init *Result2 = TInit->resolveListElementReference(R, RV, Elt)) return Result2; return VarListElementInit::get(TInit, Elt); @@ -1453,7 +1453,7 @@ Init *FieldInit::resolveListElementReference(Record &R, const RecordVal *RV, unsigned Elt) const { if (Init *ListVal = Rec->getFieldInit(R, RV, FieldName)) - if (ListInit *LI = dyn_cast(ListVal)) { + if (auto *LI = dyn_cast(ListVal)) { if (Elt >= LI->size()) return nullptr; Init *E = LI->getElement(Elt); @@ -1616,7 +1616,7 @@ void Record::checkName() { // Ensure the record name has string type. - const TypedInit *TypedName = cast(Name); + const auto *TypedName = cast(Name); if (!isa(TypedName->getType())) PrintFatalError(getLoc(), "Record name is not a string!"); } @@ -1725,9 +1725,9 @@ PrintFatalError(getLoc(), "Record `" + getName() + "' does not have a field named `" + FieldName + "'!\n"); - if (StringInit *SI = dyn_cast(R->getValue())) + if (auto *SI = dyn_cast(R->getValue())) return SI->getValue(); - if (CodeInit *CI = dyn_cast(R->getValue())) + if (auto *CI = dyn_cast(R->getValue())) return CI->getValue(); PrintFatalError(getLoc(), "Record `" + getName() + "', field `" + @@ -1740,7 +1740,7 @@ PrintFatalError(getLoc(), "Record `" + getName() + "' does not have a field named `" + FieldName + "'!\n"); - if (BitsInit *BI = dyn_cast(R->getValue())) + if (auto *BI = dyn_cast(R->getValue())) return BI; PrintFatalError(getLoc(), "Record `" + getName() + "', field `" + FieldName + "' does not have a BitsInit initializer!"); @@ -1752,7 +1752,7 @@ PrintFatalError(getLoc(), "Record `" + getName() + "' does not have a field named `" + FieldName + "'!\n"); - if (ListInit *LI = dyn_cast(R->getValue())) + if (auto *LI = dyn_cast(R->getValue())) return LI; PrintFatalError(getLoc(), "Record `" + getName() + "', field `" + FieldName + "' does not have a list initializer!"); @@ -1763,7 +1763,7 @@ ListInit *List = getValueAsListInit(FieldName); std::vector Defs; for (Init *I : List->getValues()) { - if (DefInit *DI = dyn_cast(I)) + if (auto *DI = dyn_cast(I)) Defs.push_back(DI->getDef()); else PrintFatalError(getLoc(), "Record `" + getName() + "', field `" + @@ -1778,7 +1778,7 @@ PrintFatalError(getLoc(), "Record `" + getName() + "' does not have a field named `" + FieldName + "'!\n"); - if (IntInit *II = dyn_cast(R->getValue())) + if (auto *II = dyn_cast(R->getValue())) return II->getValue(); PrintFatalError(getLoc(), "Record `" + getName() + "', field `" + FieldName + "' does not have an int initializer!"); @@ -1789,7 +1789,7 @@ ListInit *List = getValueAsListInit(FieldName); std::vector Ints; for (Init *I : List->getValues()) { - if (IntInit *II = dyn_cast(I)) + if (auto *II = dyn_cast(I)) Ints.push_back(II->getValue()); else PrintFatalError(getLoc(), "Record `" + getName() + "', field `" + @@ -1803,7 +1803,7 @@ ListInit *List = getValueAsListInit(FieldName); std::vector Strings; for (Init *I : List->getValues()) { - if (StringInit *SI = dyn_cast(I)) + if (auto *SI = dyn_cast(I)) Strings.push_back(SI->getValue()); else PrintFatalError(getLoc(), "Record `" + getName() + "', field `" + @@ -1818,7 +1818,7 @@ PrintFatalError(getLoc(), "Record `" + getName() + "' does not have a field named `" + FieldName + "'!\n"); - if (DefInit *DI = dyn_cast(R->getValue())) + if (auto *DI = dyn_cast(R->getValue())) return DI->getDef(); PrintFatalError(getLoc(), "Record `" + getName() + "', field `" + FieldName + "' does not have a def initializer!"); @@ -1830,7 +1830,7 @@ PrintFatalError(getLoc(), "Record `" + getName() + "' does not have a field named `" + FieldName + "'!\n"); - if (BitInit *BI = dyn_cast(R->getValue())) + if (auto *BI = dyn_cast(R->getValue())) return BI->getValue(); PrintFatalError(getLoc(), "Record `" + getName() + "', field `" + FieldName + "' does not have a bit initializer!"); @@ -1847,7 +1847,7 @@ return false; } Unset = false; - if (BitInit *BI = dyn_cast(R->getValue())) + if (auto *BI = dyn_cast(R->getValue())) return BI->getValue(); PrintFatalError(getLoc(), "Record `" + getName() + "', field `" + FieldName + "' does not have a bit initializer!"); @@ -1859,7 +1859,7 @@ PrintFatalError(getLoc(), "Record `" + getName() + "' does not have a field named `" + FieldName + "'!\n"); - if (DagInit *DI = dyn_cast(R->getValue())) + if (auto *DI = dyn_cast(R->getValue())) return DI; PrintFatalError(getLoc(), "Record `" + getName() + "', field `" + FieldName + "' does not have a dag initializer!"); @@ -1903,8 +1903,8 @@ static Init *GetStrConcat(Init *I0, Init *I1) { // Shortcut for the common case of concatenating two strings. - if (const StringInit *I0s = dyn_cast(I0)) - if (const StringInit *I1s = dyn_cast(I1)) + if (const auto *I0s = dyn_cast(I0)) + if (const auto *I1s = dyn_cast(I1)) return ConcatStringInits(I0s, I1s); return BinOpInit::get(BinOpInit::STRCONCAT, I0, I1, StringRecTy::get()); } @@ -1919,7 +1919,7 @@ NewName = GetStrConcat(Prefix, NewName); } - if (BinOpInit *BinOp = dyn_cast(NewName)) + if (auto *BinOp = dyn_cast(NewName)) NewName = BinOp->Fold(&CurRec, CurMultiClass); return NewName; } Index: lib/TableGen/SetTheory.cpp =================================================================== --- lib/TableGen/SetTheory.cpp +++ lib/TableGen/SetTheory.cpp @@ -77,7 +77,7 @@ Expr->getAsString()); RecSet Set; ST.evaluate(Expr->arg_begin()[0], Set, Loc); - IntInit *II = dyn_cast(Expr->arg_begin()[1]); + auto *II = dyn_cast(Expr->arg_begin()[1]); if (!II) PrintFatalError(Loc, "Second argument must be an integer: " + Expr->getAsString()); @@ -172,7 +172,7 @@ PrintFatalError(Loc, "Bad args to (sequence \"Format\", From, To): " + Expr->getAsString()); else if (Expr->arg_size() == 4) { - if (IntInit *II = dyn_cast(Expr->arg_begin()[3])) { + if (auto *II = dyn_cast(Expr->arg_begin()[3])) { Step = II->getValue(); } else PrintFatalError(Loc, "Stride must be an integer: " + @@ -180,20 +180,20 @@ } std::string Format; - if (StringInit *SI = dyn_cast(Expr->arg_begin()[0])) + if (auto *SI = dyn_cast(Expr->arg_begin()[0])) Format = SI->getValue(); else PrintFatalError(Loc, "Format must be a string: " + Expr->getAsString()); int64_t From, To; - if (IntInit *II = dyn_cast(Expr->arg_begin()[1])) + if (auto *II = dyn_cast(Expr->arg_begin()[1])) From = II->getValue(); else PrintFatalError(Loc, "From must be an integer: " + Expr->getAsString()); if (From < 0 || From >= (1 << 30)) PrintFatalError(Loc, "From out of range"); - if (IntInit *II = dyn_cast(Expr->arg_begin()[2])) + if (auto *II = dyn_cast(Expr->arg_begin()[2])) To = II->getValue(); else PrintFatalError(Loc, "To must be an integer: " + Expr->getAsString()); @@ -271,7 +271,7 @@ void SetTheory::evaluate(Init *Expr, RecSet &Elts, ArrayRef Loc) { // A def in a list can be a just an element, or it may expand. - if (DefInit *Def = dyn_cast(Expr)) { + if (auto *Def = dyn_cast(Expr)) { if (const RecVec *Result = expand(Def->getDef())) return Elts.insert(Result->begin(), Result->end()); Elts.insert(Def->getDef()); @@ -279,14 +279,14 @@ } // Lists simply expand. - if (ListInit *LI = dyn_cast(Expr)) + if (auto *LI = dyn_cast(Expr)) return evaluate(LI->begin(), LI->end(), Elts, Loc); // Anything else must be a DAG. - DagInit *DagExpr = dyn_cast(Expr); + auto *DagExpr = dyn_cast(Expr); if (!DagExpr) PrintFatalError(Loc, "Invalid set element: " + Expr->getAsString()); - DefInit *OpInit = dyn_cast(DagExpr->getOperator()); + auto *OpInit = dyn_cast(DagExpr->getOperator()); if (!OpInit) PrintFatalError(Loc, "Bad set expression: " + Expr->getAsString()); auto I = Operators.find(OpInit->getDef()->getName()); Index: lib/TableGen/TGParser.cpp =================================================================== --- lib/TableGen/TGParser.cpp +++ lib/TableGen/TGParser.cpp @@ -100,7 +100,7 @@ // Do not allow assignments like 'X = X'. This will just cause infinite loops // in the resolution machinery. if (BitList.empty()) - if (VarInit *VI = dyn_cast(V)) + if (auto *VI = dyn_cast(V)) if (VI->getNameInit() == ValName && !AllowSelfAssignment) return true; @@ -109,7 +109,7 @@ // initializer. // if (!BitList.empty()) { - BitsInit *CurVal = dyn_cast(RV->getValue()); + auto *CurVal = dyn_cast(RV->getValue()); if (!CurVal) return Error(Loc, "Value '" + ValName->getAsUnquotedString() + "' is not a bits type"); @@ -120,7 +120,7 @@ return Error(Loc, "Initializer is not compatible with bit range"); // We should have a BitsInit type now. - BitsInit *BInit = cast(BI); + auto *BInit = cast(BI); SmallVector NewBits(CurVal->getNumBits()); @@ -142,7 +142,7 @@ if (RV->setValue(V)) { std::string InitType; - if (BitsInit *BI = dyn_cast(V)) + if (auto *BI = dyn_cast(V)) InitType = (Twine("' of type bit initializer with length ") + Twine(BI->getNumBits())).str(); return Error(Loc, "Value '" + ValName->getAsUnquotedString() + @@ -308,7 +308,7 @@ if (IterVals.size() != Loops.size()) { assert(IterVals.size() < Loops.size()); ForeachLoop &CurLoop = Loops[IterVals.size()]; - ListInit *List = dyn_cast(CurLoop.ListValue); + auto *List = dyn_cast(CurLoop.ListValue); if (!List) { Error(Loc, "Loop list is not a list"); return true; @@ -333,7 +333,7 @@ // Set the iterator values now. for (IterRecord &IR : IterVals) { VarInit *IterVar = IR.IterVar; - TypedInit *IVal = dyn_cast(IR.IterValue); + auto *IVal = dyn_cast(IR.IterValue); if (!IVal) return Error(Loc, "foreach iterator value is untyped"); @@ -404,7 +404,7 @@ RecTy *Type = nullptr; if (CurRec) { - const TypedInit *CurRecName = dyn_cast(CurRec->getNameInit()); + const auto *CurRecName = dyn_cast(CurRec->getNameInit()); if (!CurRecName) { TokError("Record name is not typed!"); return nullptr; @@ -747,7 +747,7 @@ // If this is in a foreach loop, make sure it's not a loop iterator for (const auto &L : Loops) { - VarInit *IterVar = dyn_cast(L.IterVar); + auto *IterVar = dyn_cast(L.IterVar); if (IterVar && IterVar->getNameInit() == Name) return IterVar; } @@ -822,16 +822,16 @@ if (Code == UnOpInit::HEAD || Code == UnOpInit::TAIL || Code == UnOpInit::EMPTY) { - ListInit *LHSl = dyn_cast(LHS); - StringInit *LHSs = dyn_cast(LHS); - TypedInit *LHSt = dyn_cast(LHS); + auto *LHSl = dyn_cast(LHS); + auto *LHSs = dyn_cast(LHS); + auto *LHSt = dyn_cast(LHS); if (!LHSl && !LHSs && !LHSt) { TokError("expected list or string type argument in unary operator"); return nullptr; } if (LHSt) { - ListRecTy *LType = dyn_cast(LHSt->getType()); - StringRecTy *SType = dyn_cast(LHSt->getType()); + auto *LType = dyn_cast(LHSt->getType()); + auto *SType = dyn_cast(LHSt->getType()); if (!LType && !SType) { TokError("expected list or string type argument in unary operator"); return nullptr; @@ -850,7 +850,7 @@ } if (LHSl) { Init *Item = LHSl->getElement(0); - TypedInit *Itemt = dyn_cast(Item); + auto *Itemt = dyn_cast(Item); if (!Itemt) { TokError("untyped list element in unary operator"); return nullptr; @@ -859,7 +859,7 @@ : ListRecTy::get(Itemt->getType()); } else { assert(LHSt && "expected list type argument in unary operator"); - ListRecTy *LType = dyn_cast(LHSt->getType()); + auto *LType = dyn_cast(LHSt->getType()); if (!LType) { TokError("expected list type argument in unary operator"); return nullptr; @@ -940,9 +940,9 @@ // If we are doing !listconcat, we should know the type by now if (OpTok == tgtok::XListConcat) { - if (VarInit *Arg0 = dyn_cast(InitList[0])) + if (auto *Arg0 = dyn_cast(InitList[0])) Type = Arg0->getType(); - else if (ListInit *Arg0 = dyn_cast(InitList[0])) + else if (auto *Arg0 = dyn_cast(InitList[0])) Type = Arg0->getType(); else { InitList[0]->dump(); @@ -1031,16 +1031,16 @@ RecTy *MHSTy = nullptr; RecTy *RHSTy = nullptr; - if (TypedInit *MHSt = dyn_cast(MHS)) + if (auto *MHSt = dyn_cast(MHS)) MHSTy = MHSt->getType(); - if (BitsInit *MHSbits = dyn_cast(MHS)) + if (auto *MHSbits = dyn_cast(MHS)) MHSTy = BitsRecTy::get(MHSbits->getNumBits()); if (isa(MHS)) MHSTy = BitRecTy::get(); - if (TypedInit *RHSt = dyn_cast(RHS)) + if (auto *RHSt = dyn_cast(RHS)) RHSTy = RHSt->getType(); - if (BitsInit *RHSbits = dyn_cast(RHS)) + if (auto *RHSbits = dyn_cast(RHS)) RHSTy = BitsRecTy::get(RHSbits->getNumBits()); if (isa(RHS)) RHSTy = BitRecTy::get(); @@ -1067,7 +1067,7 @@ break; } case tgtok::XForEach: { - TypedInit *MHSt = dyn_cast(MHS); + auto *MHSt = dyn_cast(MHS); if (!MHSt) { TokError("could not get type for !foreach"); return nullptr; @@ -1076,7 +1076,7 @@ break; } case tgtok::XSubst: { - TypedInit *RHSt = dyn_cast(RHS); + auto *RHSt = dyn_cast(RHS); if (!RHSt) { TokError("could not get type for !subst"); return nullptr; @@ -1282,14 +1282,14 @@ // if the API was a little more orthogonal. // bits values are allowed to initialize n bits. - if (BitsInit *BI = dyn_cast(Vals[i])) { + if (auto *BI = dyn_cast(Vals[i])) { for (unsigned i = 0, e = BI->getNumBits(); i != e; ++i) NewBits.push_back(BI->getBit((e - i) - 1)); continue; } // bits can also come from variable initializers. - if (VarInit *VI = dyn_cast(Vals[i])) { - if (BitsRecTy *BitsRec = dyn_cast(VI->getType())) { + if (auto *VI = dyn_cast(Vals[i])) { + if (auto *BitsRec = dyn_cast(VI->getType())) { for (unsigned i = 0, e = BitsRec->getNumBits(); i != e; ++i) NewBits.push_back(VI->getBit((e - i) - 1)); continue; @@ -1316,7 +1316,7 @@ ListRecTy *GivenListTy = nullptr; if (ItemType) { - ListRecTy *ListType = dyn_cast(ItemType); + auto *ListType = dyn_cast(ItemType); if (!ListType) { TokError(Twine("Type mismatch for list, expected list type, got ") + ItemType->getAsString()); @@ -1357,7 +1357,7 @@ // Check elements RecTy *EltTy = nullptr; for (Init *V : Vals) { - TypedInit *TArg = dyn_cast(V); + auto *TArg = dyn_cast(V); if (!TArg) { TokError("Untyped list element"); return nullptr; @@ -1548,7 +1548,7 @@ // Create a !strconcat() operation, first casting each operand to // a string if necessary. - TypedInit *LHS = dyn_cast(Result); + auto *LHS = dyn_cast(Result); if (!LHS) { Error(PasteLoc, "LHS of paste is not typed!"); return nullptr; @@ -1789,7 +1789,7 @@ return nullptr; } RecTy *ValueType = ForeachListValue->getType(); - ListRecTy *ListType = dyn_cast(ValueType); + auto *ListType = dyn_cast(ValueType); if (!ListType) { TokError("Value list is not of list type"); return nullptr; @@ -2353,7 +2353,7 @@ } Init *DefName = DefProto->getNameInit(); - StringInit *DefNameString = dyn_cast(DefName); + auto *DefNameString = dyn_cast(DefName); if (DefNameString) { // We have a fully expanded string so there are no operators to Index: lib/Target/AArch64/AArch64AddressTypePromotion.cpp =================================================================== --- lib/Target/AArch64/AArch64AddressTypePromotion.cpp +++ lib/Target/AArch64/AArch64AddressTypePromotion.cpp @@ -147,14 +147,14 @@ if (isa(Inst)) return true; - const BinaryOperator *BinOp = dyn_cast(Inst); + const auto *BinOp = dyn_cast(Inst); if (BinOp && isa(BinOp) && (BinOp->hasNoUnsignedWrap() || BinOp->hasNoSignedWrap())) return true; // sext(trunc(sext)) --> sext if (isa(Inst) && isa(Inst->getOperand(0))) { - const Instruction *Opnd = cast(Inst->getOperand(0)); + const auto *Opnd = cast(Inst->getOperand(0)); // Check that the truncate just drop sign extended bits. if (Inst->getType()->getIntegerBitWidth() >= Opnd->getOperand(0)->getType()->getIntegerBitWidth() && @@ -275,7 +275,7 @@ // been moved yet. while (!Inst->use_empty()) { Use &U = *Inst->use_begin(); - Instruction *User = dyn_cast(U.getUser()); + auto *User = dyn_cast(U.getUser()); assert(User && "User of sext is not an Instruction!"); User->setOperand(U.getOperandNo(), SExt); } @@ -308,7 +308,7 @@ } // Check if we can statically sign extend the operand. Value *Opnd = Inst->getOperand(OpIdx); - if (const ConstantInt *Cst = dyn_cast(Opnd)) { + if (const auto *Cst = dyn_cast(Opnd)) { DEBUG(dbgs() << "Statically sign extend\n"); Inst->setOperand(OpIdx, ConstantInt::getSigned(SExt->getType(), Cst->getSExtValue())); @@ -441,7 +441,7 @@ Value *Last; do { int OpdIdx = 0; - const BinaryOperator *BinOp = dyn_cast(Inst); + const auto *BinOp = dyn_cast(Inst); if (BinOp && isa(BinOp->getOperand(0))) OpdIdx = 1; Last = Inst->getOperand(OpdIdx); Index: lib/Target/AArch64/AArch64FastISel.cpp =================================================================== --- lib/Target/AArch64/AArch64FastISel.cpp +++ lib/Target/AArch64/AArch64FastISel.cpp @@ -467,9 +467,9 @@ if (const auto *CI = dyn_cast(C)) return materializeInt(CI, VT); - else if (const ConstantFP *CFP = dyn_cast(C)) + else if (const auto *CFP = dyn_cast(C)) return materializeFP(CFP, VT); - else if (const GlobalValue *GV = dyn_cast(C)) + else if (const auto *GV = dyn_cast(C)) return materializeGV(GV); return 0; @@ -509,7 +509,7 @@ { const User *U = nullptr; unsigned Opcode = Instruction::UserOp1; - if (const Instruction *I = dyn_cast(Obj)) { + if (const auto *I = dyn_cast(Obj)) { // Don't walk into other basic blocks unless the object is an alloca from // another block, otherwise it may not have a virtual register assigned. if (FuncInfo.StaticAllocaMap.count(static_cast(Obj)) || @@ -517,7 +517,7 @@ Opcode = I->getOpcode(); U = I; } - } else if (const ConstantExpr *C = dyn_cast(Obj)) { + } else if (const auto *C = dyn_cast(Obj)) { Opcode = C->getOpcode(); U = C; } @@ -564,14 +564,14 @@ } else { uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType()); for (;;) { - if (const ConstantInt *CI = dyn_cast(Op)) { + if (const auto *CI = dyn_cast(Op)) { // Constant-offset addressing. TmpOffset += CI->getSExtValue() * S; break; } if (canFoldAddIntoGEP(U, Op)) { // A compatible add with a constant operand. Fold the constant. - ConstantInt *CI = + auto *CI = cast(cast(Op)->getOperand(1)); TmpOffset += CI->getSExtValue() * S; // Iterate on the other operand. @@ -596,7 +596,7 @@ break; } case Instruction::Alloca: { - const AllocaInst *AI = cast(Obj); + const auto *AI = cast(Obj); DenseMap::iterator SI = FuncInfo.StaticAllocaMap.find(AI); if (SI != FuncInfo.StaticAllocaMap.end()) { @@ -614,7 +614,7 @@ if (isa(LHS)) std::swap(LHS, RHS); - if (const ConstantInt *CI = dyn_cast(RHS)) { + if (const auto *CI = dyn_cast(RHS)) { Addr.setOffset(Addr.getOffset() + CI->getSExtValue()); return computeAddress(LHS, Addr, Ty); } @@ -631,7 +631,7 @@ const Value *LHS = U->getOperand(0); const Value *RHS = U->getOperand(1); - if (const ConstantInt *CI = dyn_cast(RHS)) { + if (const auto *CI = dyn_cast(RHS)) { Addr.setOffset(Addr.getOffset() - CI->getSExtValue()); return computeAddress(LHS, Addr, Ty); } @@ -889,7 +889,7 @@ break; } - if (const GlobalValue *GV = dyn_cast(V)) { + if (const auto *GV = dyn_cast(V)) { Addr.setGlobalValue(GV); return true; } @@ -1903,12 +1903,12 @@ if (TLI.supportSwiftError()) { // Swifterror values can come from either a function parameter with // swifterror attribute or an alloca with swifterror attribute. - if (const Argument *Arg = dyn_cast(SV)) { + if (const auto *Arg = dyn_cast(SV)) { if (Arg->hasSwiftErrorAttr()) return false; } - if (const AllocaInst *Alloca = dyn_cast(SV)) { + if (const auto *Alloca = dyn_cast(SV)) { if (Alloca->isSwiftError()) return false; } @@ -2103,12 +2103,12 @@ if (TLI.supportSwiftError()) { // Swifterror values can come from either a function parameter with // swifterror attribute or an alloca with swifterror attribute. - if (const Argument *Arg = dyn_cast(PtrV)) { + if (const auto *Arg = dyn_cast(PtrV)) { if (Arg->hasSwiftErrorAttr()) return false; } - if (const AllocaInst *Alloca = dyn_cast(PtrV)) { + if (const auto *Alloca = dyn_cast(PtrV)) { if (Alloca->isSwiftError()) return false; } @@ -2206,7 +2206,7 @@ /// \brief Try to emit a combined compare-and-branch instruction. bool AArch64FastISel::emitCompareAndBranch(const BranchInst *BI) { assert(isa(BI->getCondition()) && "Expected cmp instruction"); - const CmpInst *CI = cast(BI->getCondition()); + const auto *CI = cast(BI->getCondition()); CmpInst::Predicate Predicate = optimizeCmpPredicate(CI); const Value *LHS = CI->getOperand(0); @@ -2325,7 +2325,7 @@ } bool AArch64FastISel::selectBranch(const Instruction *I) { - const BranchInst *BI = cast(I); + const auto *BI = cast(I); if (BI->isUnconditional()) { MachineBasicBlock *MSucc = FuncInfo.MBBMap[BI->getSuccessor(0)]; fastEmitBranch(MSucc, BI->getDebugLoc()); @@ -2335,7 +2335,7 @@ MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; - if (const CmpInst *CI = dyn_cast(BI->getCondition())) { + if (const auto *CI = dyn_cast(BI->getCondition())) { if (CI->hasOneUse() && isValueAvailable(CI)) { // Try to optimize or fold the cmp. CmpInst::Predicate Predicate = optimizeCmpPredicate(CI); @@ -2455,7 +2455,7 @@ } bool AArch64FastISel::selectIndirectBr(const Instruction *I) { - const IndirectBrInst *BI = cast(I); + const auto *BI = cast(I); unsigned AddrReg = getRegForValue(BI->getOperand(0)); if (AddrReg == 0) return false; @@ -2473,7 +2473,7 @@ } bool AArch64FastISel::selectCmp(const Instruction *I) { - const CmpInst *CI = cast(I); + const auto *CI = cast(I); // Vectors of i1 are weird: bail out. if (CI->getType()->isVectorTy()) @@ -2645,7 +2645,7 @@ break; } - const SelectInst *SI = cast(I); + const auto *SI = cast(I); const Value *Cond = SI->getCondition(); AArch64CC::CondCode CC = AArch64CC::NE; AArch64CC::CondCode ExtraCC = AArch64CC::AL; @@ -3429,7 +3429,7 @@ return lowerCallTo(II, IntrMemName, II->getNumArgOperands() - 2); } case Intrinsic::memset: { - const MemSetInst *MSI = cast(II); + const auto *MSI = cast(II); // Don't handle volatile. if (MSI->isVolatile()) return false; @@ -3700,7 +3700,7 @@ } bool AArch64FastISel::selectRet(const Instruction *I) { - const ReturnInst *Ret = cast(I); + const auto *Ret = cast(I); const Function &F = *I->getParent()->getParent(); if (!FuncInfo.CanLowerReturn) Index: lib/Target/AArch64/AArch64ISelDAGToDAG.cpp =================================================================== --- lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -208,7 +208,7 @@ /// isIntImmediate - This method tests to see if the node is a constant /// operand. If so Imm will receive the 32-bit value. static bool isIntImmediate(const SDNode *N, uint64_t &Imm) { - if (const ConstantSDNode *C = dyn_cast(N)) { + if (const auto *C = dyn_cast(N)) { Imm = C->getZExtValue(); return true; } @@ -348,7 +348,7 @@ if (!AllowROR && ShType == AArch64_AM::ROR) return false; - if (ConstantSDNode *RHS = dyn_cast(N.getOperand(1))) { + if (auto *RHS = dyn_cast(N.getOperand(1))) { unsigned BitSize = N.getValueSizeInBits(); unsigned Val = RHS->getZExtValue() & (BitSize - 1); unsigned ShVal = AArch64_AM::getShifterImm(ShType, Val); @@ -395,7 +395,7 @@ return AArch64_AM::InvalidShiftExtend; } else if (N.getOpcode() == ISD::AND) { - ConstantSDNode *CSD = dyn_cast(N.getOperand(1)); + auto *CSD = dyn_cast(N.getOperand(1)); if (!CSD) return AArch64_AM::InvalidShiftExtend; uint64_t AndMask = CSD->getZExtValue(); @@ -429,8 +429,8 @@ if (EV.getOpcode() != ISD::EXTRACT_SUBVECTOR) return false; - ConstantSDNode *DLidx = cast(DL->getOperand(1).getNode()); - ConstantSDNode *EVidx = cast(EV.getOperand(1).getNode()); + auto *DLidx = cast(DL->getOperand(1).getNode()); + auto *EVidx = cast(EV.getOperand(1).getNode()); LaneIdx = DLidx->getSExtValue() + EVidx->getSExtValue(); LaneOp = EV.getOperand(0); @@ -568,7 +568,7 @@ AArch64_AM::ShiftExtendType Ext; if (N.getOpcode() == ISD::SHL) { - ConstantSDNode *CSD = dyn_cast(N.getOperand(1)); + auto *CSD = dyn_cast(N.getOperand(1)); if (!CSD) return false; ShiftVal = CSD->getZExtValue(); @@ -646,7 +646,7 @@ // selected here doesn't support labels/immediates, only base+offset. if (CurDAG->isBaseWithConstantOffset(N)) { - if (ConstantSDNode *RHS = dyn_cast(N.getOperand(1))) { + if (auto *RHS = dyn_cast(N.getOperand(1))) { int64_t RHSC = RHS->getSExtValue(); unsigned Scale = Log2_32(Size); if ((RHSC & (Size - 1)) == 0 && RHSC >= -(0x40 << Scale) && @@ -687,7 +687,7 @@ } if (N.getOpcode() == AArch64ISD::ADDlow && isWorthFoldingADDlow(N)) { - GlobalAddressSDNode *GAN = + auto *GAN = dyn_cast(N.getOperand(1).getNode()); Base = N.getOperand(0); OffImm = N.getOperand(1); @@ -705,7 +705,7 @@ } if (CurDAG->isBaseWithConstantOffset(N)) { - if (ConstantSDNode *RHS = dyn_cast(N.getOperand(1))) { + if (auto *RHS = dyn_cast(N.getOperand(1))) { int64_t RHSC = (int64_t)RHS->getZExtValue(); unsigned Scale = Log2_32(Size); if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) { @@ -744,7 +744,7 @@ SDValue &OffImm) { if (!CurDAG->isBaseWithConstantOffset(N)) return false; - if (ConstantSDNode *RHS = dyn_cast(N.getOperand(1))) { + if (auto *RHS = dyn_cast(N.getOperand(1))) { int64_t RHSC = RHS->getSExtValue(); // If the offset is valid as a scaled immediate, don't match here. if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && @@ -781,7 +781,7 @@ bool WantExtend, SDValue &Offset, SDValue &SignExtend) { assert(N.getOpcode() == ISD::SHL && "Invalid opcode."); - ConstantSDNode *CSD = dyn_cast(N.getOperand(1)); + auto *CSD = dyn_cast(N.getOperand(1)); if (!CSD || (CSD->getZExtValue() & 0x7) != CSD->getZExtValue()) return false; @@ -1043,7 +1043,7 @@ } bool AArch64DAGToDAGISel::tryIndexedLoad(SDNode *N) { - LoadSDNode *LD = cast(N); + auto *LD = cast(N); if (LD->isUnindexed()) return false; EVT VT = LD->getMemoryVT(); @@ -1110,7 +1110,7 @@ return false; SDValue Chain = LD->getChain(); SDValue Base = LD->getBasePtr(); - ConstantSDNode *OffsetOp = cast(LD->getOffset()); + auto *OffsetOp = cast(LD->getOffset()); int OffsetVal = (int)OffsetOp->getZExtValue(); SDLoc dl(N); SDValue Offset = CurDAG->getTargetConstant(OffsetVal, dl, MVT::i64); @@ -2392,15 +2392,15 @@ AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos, unsigned RegWidth) { APFloat FVal(0.0); - if (ConstantFPSDNode *CN = dyn_cast(N)) + if (auto *CN = dyn_cast(N)) FVal = CN->getValueAPF(); - else if (LoadSDNode *LN = dyn_cast(N)) { + else if (auto *LN = dyn_cast(N)) { // Some otherwise illegal constants are allowed in this case. if (LN->getOperand(1).getOpcode() != AArch64ISD::ADDlow || !isa(LN->getOperand(1)->getOperand(1))) return false; - ConstantPoolSDNode *CN = + auto *CN = dyn_cast(LN->getOperand(1)->getOperand(1)); FVal = cast(CN->getConstVal())->getValueAPF(); } else @@ -2468,8 +2468,8 @@ // form described in getIntOperandsFromRegsterString) or is a named register // known by the MRS SysReg mapper. bool AArch64DAGToDAGISel::tryReadRegister(SDNode *N) { - const MDNodeSDNode *MD = dyn_cast(N->getOperand(1)); - const MDString *RegString = dyn_cast(MD->getMD()->getOperand(0)); + const auto *MD = dyn_cast(N->getOperand(1)); + const auto *RegString = dyn_cast(MD->getMD()->getOperand(0)); SDLoc DL(N); int Reg = getIntOperandFromRegisterString(RegString->getString()); @@ -2506,8 +2506,8 @@ // form described in getIntOperandsFromRegsterString) or is a named register // known by the MSR SysReg mapper. bool AArch64DAGToDAGISel::tryWriteRegister(SDNode *N) { - const MDNodeSDNode *MD = dyn_cast(N->getOperand(1)); - const MDString *RegString = dyn_cast(MD->getMD()->getOperand(0)); + const auto *MD = dyn_cast(N->getOperand(1)); + const auto *RegString = dyn_cast(MD->getMD()->getOperand(0)); SDLoc DL(N); int Reg = getIntOperandFromRegisterString(RegString->getString()); @@ -2670,7 +2670,7 @@ // the rest of the compiler, especially the register allocator and copyi // propagation, to reason about, so is preferred when it's possible to // use it. - ConstantSDNode *LaneNode = cast(Node->getOperand(1)); + auto *LaneNode = cast(Node->getOperand(1)); // Bail and use the default Select() for non-zero lanes. if (LaneNode->getZExtValue() != 0) break; @@ -2710,7 +2710,7 @@ case ISD::Constant: { // Materialize zero constants as copies from WZR/XZR. This allows // the coalescer to propagate these into other instructions. - ConstantSDNode *ConstNode = cast(Node); + auto *ConstNode = cast(Node); if (ConstNode->isNullValue()) { if (VT == MVT::i32) { SDValue New = CurDAG->getCopyFromReg( Index: lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- lib/Target/AArch64/AArch64ISelLowering.cpp +++ lib/Target/AArch64/AArch64ISelLowering.cpp @@ -755,7 +755,7 @@ break; } case ISD::INTRINSIC_W_CHAIN: { - ConstantSDNode *CN = cast(Op->getOperand(1)); + auto *CN = cast(Op->getOperand(1)); Intrinsic::ID IntID = static_cast(CN->getZExtValue()); switch (IntID) { default: return; @@ -1514,7 +1514,7 @@ static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue &AArch64cc, SelectionDAG &DAG, const SDLoc &dl) { - if (ConstantSDNode *RHSC = dyn_cast(RHS.getNode())) { + if (auto *RHSC = dyn_cast(RHS.getNode())) { EVT VT = RHS.getValueType(); uint64_t C = RHSC->getZExtValue(); if (!isLegalArithImmed(C)) { @@ -1571,7 +1571,7 @@ SDValue Cmp; AArch64CC::CondCode AArch64CC; if ((CC == ISD::SETEQ || CC == ISD::SETNE) && isa(RHS)) { - const ConstantSDNode *RHSC = cast(RHS); + const auto *RHSC = cast(RHS); // The imm operand of ADDS is an unsigned immediate, in the range 0 to 4095. // For the i8 operand, the largest immediate is 255, so this can be easily @@ -1770,8 +1770,8 @@ if (LHS.getValueType() != MVT::i32 && LHS.getValueType() != MVT::i64) return Op; - ConstantSDNode *CFVal = dyn_cast(FVal); - ConstantSDNode *CTVal = dyn_cast(TVal); + auto *CFVal = dyn_cast(FVal); + auto *CTVal = dyn_cast(TVal); // The values aren't constants, this isn't the pattern we're looking for. if (!CFVal || !CTVal) @@ -2137,7 +2137,7 @@ return false; for (const SDValue &Elt : N->op_values()) { - if (ConstantSDNode *C = dyn_cast(Elt)) { + if (auto *C = dyn_cast(Elt)) { unsigned EltSize = VT.getScalarSizeInBits(); unsigned HalfSize = EltSize / 2; if (isSigned) { @@ -2170,7 +2170,7 @@ MVT TruncVT = MVT::getIntegerVT(EltSize); SmallVector Ops; for (unsigned i = 0; i != NumElts; ++i) { - ConstantSDNode *C = cast(N->getOperand(i)); + auto *C = cast(N->getOperand(i)); const APInt &CInt = C->getAPIntValue(); // Element types smaller than 32 bits are not legal, so use i32 elements. // The values are implicitly truncated so sext vs. zext doesn't matter. @@ -2821,7 +2821,7 @@ // next instruction. The behaviour of branch instructions in this // situation (as used for tail calls) is implementation-defined, so we // cannot rely on the linker replacing the tail call with a return. - if (GlobalAddressSDNode *G = dyn_cast(Callee)) { + if (auto *G = dyn_cast(Callee)) { const GlobalValue *GV = G->getGlobal(); const Triple &TT = getTargetMachine().getTargetTriple(); if (GV->hasExternalWeakLinkage() && @@ -2909,8 +2909,8 @@ for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(), UE = DAG.getEntryNode().getNode()->use_end(); U != UE; ++U) - if (LoadSDNode *L = dyn_cast(*U)) - if (FrameIndexSDNode *FI = dyn_cast(L->getBasePtr())) + if (auto *L = dyn_cast(*U)) + if (auto *FI = dyn_cast(L->getBasePtr())) if (FI->getIndex() < 0) { int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex()); int64_t InLastByte = InFirstByte; @@ -3195,7 +3195,7 @@ // node so that legalize doesn't hack it. if (getTargetMachine().getCodeModel() == CodeModel::Large && Subtarget->isTargetMachO()) { - if (GlobalAddressSDNode *G = dyn_cast(Callee)) { + if (auto *G = dyn_cast(Callee)) { const GlobalValue *GV = G->getGlobal(); bool InternalLinkage = GV->hasInternalLinkage(); if (InternalLinkage) @@ -3205,16 +3205,16 @@ DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_GOT); Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee); } - } else if (ExternalSymbolSDNode *S = + } else if (auto *S = dyn_cast(Callee)) { const char *Sym = S->getSymbol(); Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, AArch64II::MO_GOT); Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee); } - } else if (GlobalAddressSDNode *G = dyn_cast(Callee)) { + } else if (auto *G = dyn_cast(Callee)) { const GlobalValue *GV = G->getGlobal(); Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 0); - } else if (ExternalSymbolSDNode *S = dyn_cast(Callee)) { + } else if (auto *S = dyn_cast(Callee)) { const char *Sym = S->getSymbol(); Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, 0); } @@ -3380,7 +3380,7 @@ SelectionDAG &DAG) const { EVT PtrVT = getPointerTy(DAG.getDataLayout()); SDLoc DL(Op); - const GlobalAddressSDNode *GN = cast(Op); + const auto *GN = cast(Op); const GlobalValue *GV = GN->getGlobal(); unsigned char OpFlags = Subtarget->ClassifyGlobalReference(GV, getTargetMachine()); @@ -3533,7 +3533,7 @@ // maximum TLS size is 4GiB. // FIXME: add -mtls-size command line option and make it control the 16MiB // vs. 4GiB code sequence generation. - const GlobalAddressSDNode *GA = cast(Op); + const auto *GA = cast(Op); TLSModel::Model Model = getTargetMachine().getTLSModel(GA->getGlobal()); @@ -3685,7 +3685,7 @@ // If the RHS of the comparison is zero, we can potentially fold this // to a specialized branch. - const ConstantSDNode *RHSC = dyn_cast(RHS); + const auto *RHSC = dyn_cast(RHS); if (RHSC && RHSC->getZExtValue() == 0) { if (CC == ISD::SETEQ) { // See if we can use a TBZ to fold in an AND as well. @@ -3976,8 +3976,8 @@ // If both the TVal and the FVal are constants, see if we can swap them in // order to for a CSINV or CSINC out of them. - ConstantSDNode *CFVal = dyn_cast(FVal); - ConstantSDNode *CTVal = dyn_cast(TVal); + auto *CFVal = dyn_cast(FVal); + auto *CTVal = dyn_cast(TVal); if (CTVal && CFVal && CTVal->isAllOnesValue() && CFVal->isNullValue()) { std::swap(TVal, FVal); @@ -4059,7 +4059,7 @@ // is one, zero or negative one in the case of a CSEL. We can always // materialize these values using CSINC, CSEL and CSINV with wzr/xzr as the // FVal, respectively. - ConstantSDNode *RHSVal = dyn_cast(RHS); + auto *RHSVal = dyn_cast(RHS); if (Opcode == AArch64ISD::CSEL && RHSVal && !RHSVal->isOne() && !RHSVal->isNullValue() && !RHSVal->isAllOnesValue()) { AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC); @@ -4102,10 +4102,10 @@ if (DAG.getTarget().Options.UnsafeFPMath) { // Transform "a == 0.0 ? 0.0 : x" to "a == 0.0 ? a : x" and // "a != 0.0 ? x : 0.0" to "a != 0.0 ? x : a" to avoid materializing 0.0. - ConstantFPSDNode *RHSVal = dyn_cast(RHS); + auto *RHSVal = dyn_cast(RHS); if (RHSVal && RHSVal->isZero()) { - ConstantFPSDNode *CFVal = dyn_cast(FVal); - ConstantFPSDNode *CTVal = dyn_cast(TVal); + auto *CFVal = dyn_cast(FVal); + auto *CTVal = dyn_cast(TVal); if ((CC == ISD::SETEQ || CC == ISD::SETOEQ || CC == ISD::SETUEQ) && CTVal && CTVal->isZero() && TVal.getValueType() == LHS.getValueType()) @@ -4188,7 +4188,7 @@ SelectionDAG &DAG) const { // Jump table entries as PC relative offsets. No additional tweaking // is necessary here. Just get the address of the jump table. - JumpTableSDNode *JT = cast(Op); + auto *JT = cast(Op); EVT PtrVT = getPointerTy(DAG.getDataLayout()); SDLoc DL(Op); @@ -4214,7 +4214,7 @@ SDValue AArch64TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { - ConstantPoolSDNode *CP = cast(Op); + auto *CP = cast(Op); EVT PtrVT = getPointerTy(DAG.getDataLayout()); SDLoc DL(Op); @@ -4913,7 +4913,7 @@ case 'L': case 'M': case 'N': - ConstantSDNode *C = dyn_cast(Op); + auto *C = dyn_cast(Op); if (!C) return; @@ -5707,7 +5707,7 @@ SDLoc dl(Op); EVT VT = Op.getValueType(); - ShuffleVectorSDNode *SVN = cast(Op.getNode()); + auto *SVN = cast(Op.getNode()); // Convert shuffles that are directly supported on NEON to target-specific // DAG nodes, instead of keeping them as shuffles and matching them again @@ -5878,7 +5878,7 @@ SDValue AArch64TargetLowering::LowerVectorAND(SDValue Op, SelectionDAG &DAG) const { - BuildVectorSDNode *BVN = + auto *BVN = dyn_cast(Op.getOperand(1).getNode()); SDValue LHS = Op.getOperand(0); SDLoc dl(Op); @@ -5974,10 +5974,10 @@ // ConstVal static bool isAllConstantBuildVector(const SDValue &PotentialBVec, uint64_t &ConstVal) { - BuildVectorSDNode *Bvec = dyn_cast(PotentialBVec); + auto *Bvec = dyn_cast(PotentialBVec); if (!Bvec) return false; - ConstantSDNode *FirstElt = dyn_cast(Bvec->getOperand(0)); + auto *FirstElt = dyn_cast(Bvec->getOperand(0)); if (!FirstElt) return false; EVT VT = Bvec->getValueType(0); @@ -6030,7 +6030,7 @@ bool IsShiftRight = ShiftOpc == AArch64ISD::VLSHR; // Is the shift amount constant? - ConstantSDNode *C2node = dyn_cast(Shift.getOperand(1)); + auto *C2node = dyn_cast(Shift.getOperand(1)); if (!C2node) return SDValue(); @@ -6076,7 +6076,7 @@ return Res; } - BuildVectorSDNode *BVN = + auto *BVN = dyn_cast(Op.getOperand(0).getNode()); SDValue LHS = Op.getOperand(1); SDLoc dl(Op); @@ -6198,7 +6198,7 @@ SDLoc dl(Op); EVT VT = Op.getValueType(); Op = NormalizeBuildVector(Op, DAG); - BuildVectorSDNode *BVN = cast(Op.getNode()); + auto *BVN = cast(Op.getNode()); APInt CnstBits(VT.getSizeInBits(), 0); APInt UndefBits(VT.getSizeInBits(), 0); @@ -6575,7 +6575,7 @@ // Check for non-constant or out of range lane. EVT VT = Op.getOperand(0).getValueType(); - ConstantSDNode *CI = dyn_cast(Op.getOperand(2)); + auto *CI = dyn_cast(Op.getOperand(2)); if (!CI || CI->getZExtValue() >= VT.getVectorNumElements()) return SDValue(); @@ -6609,7 +6609,7 @@ // Check for non-constant or out of range lane. EVT VT = Op.getOperand(0).getValueType(); - ConstantSDNode *CI = dyn_cast(Op.getOperand(1)); + auto *CI = dyn_cast(Op.getOperand(1)); if (!CI || CI->getZExtValue() >= VT.getVectorNumElements()) return SDValue(); @@ -6647,7 +6647,7 @@ if (!VT.isVector()) return SDValue(); - ConstantSDNode *Cst = dyn_cast(Op.getOperand(1)); + auto *Cst = dyn_cast(Op.getOperand(1)); if (!Cst) return SDValue(); unsigned Val = Cst->getZExtValue(); @@ -6712,7 +6712,7 @@ // Ignore bit_converts. while (Op.getOpcode() == ISD::BITCAST) Op = Op.getOperand(0); - BuildVectorSDNode *BVN = dyn_cast(Op.getNode()); + auto *BVN = dyn_cast(Op.getNode()); APInt SplatBits, SplatUndef; unsigned SplatBitSize; bool HasAnyUndefs; @@ -6803,7 +6803,7 @@ assert(VT.getSizeInBits() == SrcVT.getSizeInBits() && "function only supposed to emit natural comparisons"); - BuildVectorSDNode *BVN = dyn_cast(RHS.getNode()); + auto *BVN = dyn_cast(RHS.getNode()); APInt CnstBits(VT.getSizeInBits(), 0); APInt UndefBits(VT.getSizeInBits(), 0); bool IsCnst = BVN && resolveBuildVector(BVN, CnstBits, UndefBits); @@ -7003,7 +7003,7 @@ } case Intrinsic::aarch64_ldaxr: case Intrinsic::aarch64_ldxr: { - PointerType *PtrTy = cast(I.getArgOperand(0)->getType()); + auto *PtrTy = cast(I.getArgOperand(0)->getType()); Info.opc = ISD::INTRINSIC_W_CHAIN; Info.memVT = MVT::getVT(PtrTy->getElementType()); Info.ptrVal = I.getArgOperand(0); @@ -7016,7 +7016,7 @@ } case Intrinsic::aarch64_stlxr: case Intrinsic::aarch64_stxr: { - PointerType *PtrTy = cast(I.getArgOperand(1)->getType()); + auto *PtrTy = cast(I.getArgOperand(1)->getType()); Info.opc = ISD::INTRINSIC_W_CHAIN; Info.memVT = MVT::getVT(PtrTy->getElementType()); Info.ptrVal = I.getArgOperand(1); @@ -7146,7 +7146,7 @@ // addressing mode or an arithmetic operation: add, sub, and cmp. // Is there a shift? - const Instruction *Instr = cast(U.getUser()); + const auto *Instr = cast(U.getUser()); // Is this a constant shift? switch (Instr->getOpcode()) { @@ -7579,7 +7579,7 @@ if (VT.isInteger() && N->getOpcode() == ISD::XOR && N0.getOpcode() == ISD::ADD && N0.getOperand(1) == N1 && N1.getOpcode() == ISD::SRA && N1.getOperand(0) == N0.getOperand(0)) - if (ConstantSDNode *Y1C = dyn_cast(N1.getOperand(1))) + if (auto *Y1C = dyn_cast(N1.getOperand(1))) if (Y1C->getAPIntValue() == VT.getSizeInBits() - 1) { SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), N0.getOperand(0)); @@ -7662,7 +7662,7 @@ if (!isa(N->getOperand(1))) return SDValue(); - ConstantSDNode *C = cast(N->getOperand(1)); + auto *C = cast(N->getOperand(1)); const APInt &ConstValue = C->getAPIntValue(); // Multiplication of a power of two plus/minus one can be done more @@ -7775,7 +7775,7 @@ // make the transformation for non-constant splats as well, but it's unclear // that would be a benefit as it would not eliminate any operations, just // perform one more step in scalar code before moving to the vector unit. - if (BuildVectorSDNode *BV = + if (auto *BV = dyn_cast(N->getOperand(0)->getOperand(1))) { // Bail out if the vector isn't a constant. if (!BV->isConstant()) @@ -7820,7 +7820,7 @@ if (Subtarget->hasNEON() && ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() && // Do not change the width of a volatile load. !cast(N0)->isVolatile()) { - LoadSDNode *LN0 = cast(N0); + auto *LN0 = cast(N0); SDValue Load = DAG.getLoad(VT, SDLoc(N), LN0->getChain(), LN0->getBasePtr(), LN0->getPointerInfo(), LN0->getAlignment(), LN0->getMemOperand()->getFlags()); @@ -7869,7 +7869,7 @@ return SDValue(); BitVector UndefElements; - BuildVectorSDNode *BV = cast(ConstVec); + auto *BV = cast(ConstVec); int32_t Bits = IntBits == 64 ? 64 : 32; int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, Bits + 1); if (C == -1 || C == 0 || C > Bits) @@ -7943,7 +7943,7 @@ return SDValue(); BitVector UndefElements; - BuildVectorSDNode *BV = cast(ConstVec); + auto *BV = cast(ConstVec); int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, FloatBits + 1); if (C == -1 || C == 0 || C > FloatBits) return SDValue(); @@ -8065,15 +8065,15 @@ uint64_t BitMask = Bits == 64 ? -1ULL : ((1ULL << Bits) - 1); for (int i = 1; i >= 0; --i) for (int j = 1; j >= 0; --j) { - BuildVectorSDNode *BVN0 = dyn_cast(N0->getOperand(i)); - BuildVectorSDNode *BVN1 = dyn_cast(N1->getOperand(j)); + auto *BVN0 = dyn_cast(N0->getOperand(i)); + auto *BVN1 = dyn_cast(N1->getOperand(j)); if (!BVN0 || !BVN1) continue; bool FoundMatch = true; for (unsigned k = 0; k < VT.getVectorNumElements(); ++k) { - ConstantSDNode *CN0 = dyn_cast(BVN0->getOperand(k)); - ConstantSDNode *CN1 = dyn_cast(BVN1->getOperand(k)); + auto *CN0 = dyn_cast(BVN0->getOperand(k)); + auto *CN1 = dyn_cast(BVN1->getOperand(k)); if (!CN0 || !CN1 || CN0->getZExtValue() != (BitMask & ~CN1->getZExtValue())) { FoundMatch = false; @@ -8122,7 +8122,7 @@ SDLoc DL(N); SDValue N1 = N->getOperand(1); SDValue N00 = N0.getOperand(0); - if (ConstantSDNode *C = dyn_cast(N1)) { + if (auto *C = dyn_cast(N1)) { uint64_t ShiftAmt = C->getZExtValue(); if (VT == MVT::i32 && ShiftAmt == 16 && DAG.MaskedValueIsZero(N00, APInt::getHighBitsSet(32, 16))) @@ -8445,8 +8445,8 @@ // Check that the operands matches the constraints: // (1) Both operands must be constants. // (2) One must be 1 and the other must be 0. - ConstantSDNode *TValue = dyn_cast(Op.getOperand(0)); - ConstantSDNode *FValue = dyn_cast(Op.getOperand(1)); + auto *TValue = dyn_cast(Op.getOperand(0)); + auto *FValue = dyn_cast(Op.getOperand(1)); // Check (1). if (!TValue || !FValue) @@ -8609,7 +8609,7 @@ unsigned ElemBits = ElemTy.getSizeInBits(); int64_t ShiftAmount; - if (BuildVectorSDNode *BVN = dyn_cast(N->getOperand(2))) { + if (auto *BVN = dyn_cast(N->getOperand(2))) { APInt SplatValue, SplatUndef; unsigned SplatBitSize; bool HasAnyUndefs; @@ -8619,7 +8619,7 @@ return SDValue(); ShiftAmount = SplatValue.getSExtValue(); - } else if (ConstantSDNode *CVN = dyn_cast(N->getOperand(2))) { + } else if (auto *CVN = dyn_cast(N->getOperand(2))) { ShiftAmount = CVN->getSExtValue(); } else return SDValue(); @@ -8672,7 +8672,7 @@ if (AndN.getOpcode() != ISD::AND) return SDValue(); - ConstantSDNode *CMask = dyn_cast(AndN.getOperand(1)); + auto *CMask = dyn_cast(AndN.getOperand(1)); if (!CMask || CMask->getZExtValue() != Mask) return SDValue(); @@ -8967,7 +8967,7 @@ return SDValue(); // Check insert element index. - ConstantSDNode *CIndex = dyn_cast(StVal.getOperand(2)); + auto *CIndex = dyn_cast(StVal.getOperand(2)); if (!CIndex) return SDValue(); uint64_t IndexVal = CIndex->getZExtValue(); @@ -8990,7 +8990,7 @@ if (!DCI.isBeforeLegalize()) return SDValue(); - StoreSDNode *S = cast(N); + auto *S = cast(N); if (S->isVolatile()) return SDValue(); @@ -9073,7 +9073,7 @@ if (LD->getOpcode() != ISD::LOAD) return SDValue(); - LoadSDNode *LoadSDN = cast(LD); + auto *LoadSDN = cast(LD); EVT MemVT = LoadSDN->getMemoryVT(); // Check if memory operand is the same type as the vector element. if (MemVT != VT.getVectorElementType()) @@ -9110,7 +9110,7 @@ // If the increment is a constant, it must match the memory ref size. SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); - if (ConstantSDNode *CInc = dyn_cast(Inc.getNode())) { + if (auto *CInc = dyn_cast(Inc.getNode())) { uint32_t IncVal = CInc->getZExtValue(); unsigned NumBytes = VT.getScalarSizeInBits() / 8; if (IncVal != NumBytes) @@ -9532,7 +9532,7 @@ // If the increment is a constant, it must match the memory ref size. SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); - if (ConstantSDNode *CInc = dyn_cast(Inc.getNode())) { + if (auto *CInc = dyn_cast(Inc.getNode())) { uint32_t IncVal = CInc->getZExtValue(); unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; if (IsLaneOp || IsDupOp) @@ -9560,7 +9560,7 @@ Tys[n] = MVT::Other; // Type of the chain SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs + 2)); - MemIntrinsicSDNode *MemInt = cast(N); + auto *MemInt = cast(N); SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys, Ops, MemInt->getMemoryVT(), MemInt->getMemOperand()); @@ -9588,7 +9588,7 @@ default: return false; case ISD::LOAD: { - LoadSDNode *LoadNode = cast(V.getNode()); + auto *LoadNode = cast(V.getNode()); if ((LoadNode->getMemoryVT() == MVT::i8 && width == 8) || (LoadNode->getMemoryVT() == MVT::i16 && width == 16)) { ExtType = LoadNode->getExtensionType(); @@ -9597,7 +9597,7 @@ return false; } case ISD::AssertSext: { - VTSDNode *TypeNode = cast(V.getNode()->getOperand(1)); + auto *TypeNode = cast(V.getNode()->getOperand(1)); if ((TypeNode->getVT() == MVT::i8 && width == 8) || (TypeNode->getVT() == MVT::i16 && width == 16)) { ExtType = ISD::SEXTLOAD; @@ -9606,7 +9606,7 @@ return false; } case ISD::AssertZext: { - VTSDNode *TypeNode = cast(V.getNode()->getOperand(1)); + auto *TypeNode = cast(V.getNode()->getOperand(1)); if ((TypeNode->getVT() == MVT::i8 && width == 8) || (TypeNode->getVT() == MVT::i16 && width == 16)) { ExtType = ISD::ZEXTLOAD; @@ -9782,7 +9782,7 @@ if (AndNode->getOpcode() != ISD::AND) return SDValue(); - if (ConstantSDNode *CN = dyn_cast(AndNode->getOperand(1))) { + if (auto *CN = dyn_cast(AndNode->getOperand(1))) { uint32_t CNV = CN->getZExtValue(); if (CNV == 255) MaskBits = 8; @@ -10242,7 +10242,7 @@ Base = Op->getOperand(0); // All of the indexed addressing mode instructions take a signed // 9 bit immediate offset. - if (ConstantSDNode *RHS = dyn_cast(Op->getOperand(1))) { + if (auto *RHS = dyn_cast(Op->getOperand(1))) { int64_t RHSC = (int64_t)RHS->getZExtValue(); if (RHSC >= 256 || RHSC <= -256) return false; @@ -10259,10 +10259,10 @@ SelectionDAG &DAG) const { EVT VT; SDValue Ptr; - if (LoadSDNode *LD = dyn_cast(N)) { + if (auto *LD = dyn_cast(N)) { VT = LD->getMemoryVT(); Ptr = LD->getBasePtr(); - } else if (StoreSDNode *ST = dyn_cast(N)) { + } else if (auto *ST = dyn_cast(N)) { VT = ST->getMemoryVT(); Ptr = ST->getBasePtr(); } else @@ -10280,10 +10280,10 @@ ISD::MemIndexedMode &AM, SelectionDAG &DAG) const { EVT VT; SDValue Ptr; - if (LoadSDNode *LD = dyn_cast(N)) { + if (auto *LD = dyn_cast(N)) { VT = LD->getMemoryVT(); Ptr = LD->getBasePtr(); - } else if (StoreSDNode *ST = dyn_cast(N)) { + } else if (auto *ST = dyn_cast(N)) { VT = ST->getMemoryVT(); Ptr = ST->getBasePtr(); } else Index: lib/Target/AArch64/AArch64InstrInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64InstrInfo.cpp +++ lib/Target/AArch64/AArch64InstrInfo.cpp @@ -1259,7 +1259,7 @@ MachineBasicBlock &MBB = *MI.getParent(); DebugLoc DL = MI.getDebugLoc(); unsigned Reg = MI.getOperand(0).getReg(); - const GlobalValue *GV = + const auto *GV = cast((*MI.memoperands_begin())->getValue()); const TargetMachine &TM = MBB.getParent()->getTarget(); unsigned char OpFlags = Subtarget.ClassifyGlobalReference(GV, TM); Index: lib/Target/AArch64/AArch64PromoteConstant.cpp =================================================================== --- lib/Target/AArch64/AArch64PromoteConstant.cpp +++ lib/Target/AArch64/AArch64PromoteConstant.cpp @@ -293,7 +293,7 @@ return false; // Do not mess with inline asm. - const CallInst *CI = dyn_cast(Instr); + const auto *CI = dyn_cast(Instr); return !(CI && isa(CI->getCalledValue())); } @@ -348,7 +348,7 @@ unsigned OpNo) { // If this user is a phi, the insertion point is in the related // incoming basic block. - if (PHINode *PhiInst = dyn_cast(&User)) + if (auto *PhiInst = dyn_cast(&User)) return PhiInst->getIncomingBlock(OpNo)->getTerminator(); return &User; @@ -538,7 +538,7 @@ // Traverse the operand, looking for constant vectors. Replace them by a // load of a global variable of constant vector type. for (Use &U : I.operands()) { - Constant *Cst = dyn_cast(U); + auto *Cst = dyn_cast(U); // There is no point in promoting global values as they are already // global. Do not promote constant expressions either, as they may // require some code expansion. Index: lib/Target/AArch64/AArch64SelectionDAGInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64SelectionDAGInfo.cpp +++ lib/Target/AArch64/AArch64SelectionDAGInfo.cpp @@ -21,8 +21,8 @@ SDValue Size, unsigned Align, bool isVolatile, MachinePointerInfo DstPtrInfo) const { // Check to see if there is a specialized entry-point for memory zeroing. - ConstantSDNode *V = dyn_cast(Src); - ConstantSDNode *SizeValue = dyn_cast(Size); + auto *V = dyn_cast(Src); + auto *SizeValue = dyn_cast(Size); const AArch64Subtarget &STI = DAG.getMachineFunction().getSubtarget(); const char *bzeroEntry = Index: lib/Target/AArch64/AArch64TargetTransformInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -554,7 +554,7 @@ case Intrinsic::aarch64_neon_st3: case Intrinsic::aarch64_neon_st4: { // Create a struct type - StructType *ST = dyn_cast(ExpectedType); + auto *ST = dyn_cast(ExpectedType); if (!ST) return nullptr; unsigned NumElts = Inst->getNumArgOperands() - 1; Index: lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp =================================================================== --- lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp +++ lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp @@ -443,7 +443,7 @@ bool isSImm9() const { if (!isImm()) return false; - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) return false; int64_t Val = MCE->getValue(); @@ -452,7 +452,7 @@ bool isSImm7s4() const { if (!isImm()) return false; - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) return false; int64_t Val = MCE->getValue(); @@ -461,7 +461,7 @@ bool isSImm7s8() const { if (!isImm()) return false; - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) return false; int64_t Val = MCE->getValue(); @@ -470,7 +470,7 @@ bool isSImm7s16() const { if (!isImm()) return false; - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) return false; int64_t Val = MCE->getValue(); @@ -514,7 +514,7 @@ if (!isImm()) return false; - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) return isSymbolicUImm12Offset(getImm(), Scale); @@ -525,7 +525,7 @@ bool isImm0_1() const { if (!isImm()) return false; - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) return false; int64_t Val = MCE->getValue(); @@ -534,7 +534,7 @@ bool isImm0_7() const { if (!isImm()) return false; - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) return false; int64_t Val = MCE->getValue(); @@ -543,7 +543,7 @@ bool isImm1_8() const { if (!isImm()) return false; - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) return false; int64_t Val = MCE->getValue(); @@ -552,7 +552,7 @@ bool isImm0_15() const { if (!isImm()) return false; - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) return false; int64_t Val = MCE->getValue(); @@ -561,7 +561,7 @@ bool isImm1_16() const { if (!isImm()) return false; - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) return false; int64_t Val = MCE->getValue(); @@ -570,7 +570,7 @@ bool isImm0_31() const { if (!isImm()) return false; - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) return false; int64_t Val = MCE->getValue(); @@ -579,7 +579,7 @@ bool isImm1_31() const { if (!isImm()) return false; - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) return false; int64_t Val = MCE->getValue(); @@ -588,7 +588,7 @@ bool isImm1_32() const { if (!isImm()) return false; - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) return false; int64_t Val = MCE->getValue(); @@ -597,7 +597,7 @@ bool isImm0_63() const { if (!isImm()) return false; - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) return false; int64_t Val = MCE->getValue(); @@ -606,7 +606,7 @@ bool isImm1_63() const { if (!isImm()) return false; - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) return false; int64_t Val = MCE->getValue(); @@ -615,7 +615,7 @@ bool isImm1_64() const { if (!isImm()) return false; - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) return false; int64_t Val = MCE->getValue(); @@ -624,7 +624,7 @@ bool isImm0_127() const { if (!isImm()) return false; - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) return false; int64_t Val = MCE->getValue(); @@ -633,7 +633,7 @@ bool isImm0_255() const { if (!isImm()) return false; - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) return false; int64_t Val = MCE->getValue(); @@ -642,7 +642,7 @@ bool isImm0_65535() const { if (!isImm()) return false; - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) return false; int64_t Val = MCE->getValue(); @@ -651,7 +651,7 @@ bool isImm32_63() const { if (!isImm()) return false; - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) return false; int64_t Val = MCE->getValue(); @@ -660,7 +660,7 @@ bool isLogicalImm32() const { if (!isImm()) return false; - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) return false; int64_t Val = MCE->getValue(); @@ -672,7 +672,7 @@ bool isLogicalImm64() const { if (!isImm()) return false; - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) return false; return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64); @@ -680,7 +680,7 @@ bool isLogicalImm32Not() const { if (!isImm()) return false; - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) return false; int64_t Val = ~MCE->getValue() & 0xFFFFFFFF; @@ -689,7 +689,7 @@ bool isLogicalImm64Not() const { if (!isImm()) return false; - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) return false; return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64); @@ -753,14 +753,14 @@ Expr = getImm(); // Otherwise it should be a real negative immediate in range: - const MCConstantExpr *CE = dyn_cast(Expr); + const auto *CE = dyn_cast(Expr); return CE != nullptr && CE->getValue() < 0 && -CE->getValue() <= 0xfff; } bool isCondCode() const { return Kind == k_CondCode; } bool isSIMDImmType10() const { if (!isImm()) return false; - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) return false; return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue()); @@ -768,7 +768,7 @@ bool isBranchTarget26() const { if (!isImm()) return false; - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) return true; int64_t Val = MCE->getValue(); @@ -779,7 +779,7 @@ bool isPCRelLabel19() const { if (!isImm()) return false; - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) return true; int64_t Val = MCE->getValue(); @@ -790,7 +790,7 @@ bool isBranchTarget14() const { if (!isImm()) return false; - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) return true; int64_t Val = MCE->getValue(); @@ -870,7 +870,7 @@ bool isMOVZMovAlias() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; uint64_t Value = CE->getValue(); @@ -881,7 +881,7 @@ bool isMOVNMovAlias() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; uint64_t Value = CE->getValue(); @@ -1126,7 +1126,7 @@ if (!isImm()) return false; - if (const MCConstantExpr *CE = dyn_cast(Imm.Val)) { + if (const auto *CE = dyn_cast(Imm.Val)) { int64_t Val = CE->getValue(); int64_t Min = - (4096 * (1LL << (21 - 1))); int64_t Max = 4096 * ((1LL << (21 - 1)) - 1); @@ -1142,7 +1142,7 @@ if (!isImm()) return false; - if (const MCConstantExpr *CE = dyn_cast(Imm.Val)) { + if (const auto *CE = dyn_cast(Imm.Val)) { int64_t Val = CE->getValue(); int64_t Min = - (1LL << (21 - 1)); int64_t Max = ((1LL << (21 - 1)) - 1); @@ -1156,7 +1156,7 @@ // Add as immediates when possible. Null MCExpr = 0. if (!Expr) Inst.addOperand(MCOperand::createImm(0)); - else if (const MCConstantExpr *CE = dyn_cast(Expr)) + else if (const auto *CE = dyn_cast(Expr)) Inst.addOperand(MCOperand::createImm(CE->getValue())); else Inst.addOperand(MCOperand::createExpr(Expr)); @@ -1272,7 +1272,7 @@ assert(N == 2 && "Invalid number of operands!"); const MCExpr *MCE = isShiftedImm() ? getShiftedImmVal() : getImm(); - const MCConstantExpr *CE = cast(MCE); + const auto *CE = cast(MCE); int64_t Val = -CE->getValue(); unsigned ShiftAmt = isShiftedImm() ? ShiftedImm.ShiftAmount : 0; @@ -1287,7 +1287,7 @@ void addAdrpLabelOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) addExpr(Inst, getImm()); else @@ -1301,7 +1301,7 @@ template void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) { Inst.addOperand(MCOperand::createExpr(getImm())); @@ -1312,122 +1312,122 @@ void addSImm9Operands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); + const auto *MCE = cast(getImm()); Inst.addOperand(MCOperand::createImm(MCE->getValue())); } void addSImm7s4Operands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); + const auto *MCE = cast(getImm()); Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4)); } void addSImm7s8Operands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); + const auto *MCE = cast(getImm()); Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8)); } void addSImm7s16Operands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); + const auto *MCE = cast(getImm()); Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16)); } void addImm0_1Operands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); + const auto *MCE = cast(getImm()); Inst.addOperand(MCOperand::createImm(MCE->getValue())); } void addImm0_7Operands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); + const auto *MCE = cast(getImm()); Inst.addOperand(MCOperand::createImm(MCE->getValue())); } void addImm1_8Operands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); + const auto *MCE = cast(getImm()); Inst.addOperand(MCOperand::createImm(MCE->getValue())); } void addImm0_15Operands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); + const auto *MCE = cast(getImm()); Inst.addOperand(MCOperand::createImm(MCE->getValue())); } void addImm1_16Operands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); + const auto *MCE = cast(getImm()); assert(MCE && "Invalid constant immediate operand!"); Inst.addOperand(MCOperand::createImm(MCE->getValue())); } void addImm0_31Operands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); + const auto *MCE = cast(getImm()); Inst.addOperand(MCOperand::createImm(MCE->getValue())); } void addImm1_31Operands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); + const auto *MCE = cast(getImm()); Inst.addOperand(MCOperand::createImm(MCE->getValue())); } void addImm1_32Operands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); + const auto *MCE = cast(getImm()); Inst.addOperand(MCOperand::createImm(MCE->getValue())); } void addImm0_63Operands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); + const auto *MCE = cast(getImm()); Inst.addOperand(MCOperand::createImm(MCE->getValue())); } void addImm1_63Operands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); + const auto *MCE = cast(getImm()); Inst.addOperand(MCOperand::createImm(MCE->getValue())); } void addImm1_64Operands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); + const auto *MCE = cast(getImm()); Inst.addOperand(MCOperand::createImm(MCE->getValue())); } void addImm0_127Operands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); + const auto *MCE = cast(getImm()); Inst.addOperand(MCOperand::createImm(MCE->getValue())); } void addImm0_255Operands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); + const auto *MCE = cast(getImm()); Inst.addOperand(MCOperand::createImm(MCE->getValue())); } void addImm0_65535Operands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); + const auto *MCE = cast(getImm()); Inst.addOperand(MCOperand::createImm(MCE->getValue())); } void addImm32_63Operands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); + const auto *MCE = cast(getImm()); Inst.addOperand(MCOperand::createImm(MCE->getValue())); } void addLogicalImm32Operands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); + const auto *MCE = cast(getImm()); uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32); Inst.addOperand(MCOperand::createImm(encoding)); @@ -1435,14 +1435,14 @@ void addLogicalImm64Operands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); + const auto *MCE = cast(getImm()); uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64); Inst.addOperand(MCOperand::createImm(encoding)); } void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); + const auto *MCE = cast(getImm()); int64_t Val = ~MCE->getValue() & 0xFFFFFFFF; uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32); Inst.addOperand(MCOperand::createImm(encoding)); @@ -1450,7 +1450,7 @@ void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); + const auto *MCE = cast(getImm()); uint64_t encoding = AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64); Inst.addOperand(MCOperand::createImm(encoding)); @@ -1458,7 +1458,7 @@ void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = cast(getImm()); + const auto *MCE = cast(getImm()); uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue()); Inst.addOperand(MCOperand::createImm(encoding)); } @@ -1468,7 +1468,7 @@ // here. If it's a label, however, just put it on directly as there's // not enough information now to do anything. assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) { addExpr(Inst, getImm()); return; @@ -1482,7 +1482,7 @@ // here. If it's a label, however, just put it on directly as there's // not enough information now to do anything. assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) { addExpr(Inst, getImm()); return; @@ -1496,7 +1496,7 @@ // here. If it's a label, however, just put it on directly as there's // not enough information now to do anything. assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *MCE = dyn_cast(getImm()); + const auto *MCE = dyn_cast(getImm()); if (!MCE) { addExpr(Inst, getImm()); return; @@ -1601,7 +1601,7 @@ void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *CE = cast(getImm()); + const auto *CE = cast(getImm()); uint64_t Value = CE->getValue(); Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff)); } @@ -1610,7 +1610,7 @@ void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *CE = cast(getImm()); + const auto *CE = cast(getImm()); uint64_t Value = CE->getValue(); Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff)); } @@ -2068,7 +2068,7 @@ if (getParser().parseExpression(ImmVal)) return MatchOperand_ParseFail; - const MCConstantExpr *MCE = dyn_cast(ImmVal); + const auto *MCE = dyn_cast(ImmVal); if (!MCE) { TokError("immediate value expected for prefetch operand"); return MatchOperand_ParseFail; @@ -2267,7 +2267,7 @@ return MatchOperand_ParseFail; else if (Parser.getTok().isNot(AsmToken::Comma)) { uint64_t ShiftAmount = 0; - const MCConstantExpr *MCE = dyn_cast(Imm); + const auto *MCE = dyn_cast(Imm); if (MCE) { int64_t Val = MCE->getValue(); if (Val > 0xfff && (Val & 0xfff) == 0) { @@ -2425,7 +2425,7 @@ if (getParser().parseExpression(ImmVal)) return MatchOperand_ParseFail; - const MCConstantExpr *MCE = dyn_cast(ImmVal); + const auto *MCE = dyn_cast(ImmVal); if (!MCE) { Error(E, "expected constant '#imm' after shift specifier"); return MatchOperand_ParseFail; @@ -2713,7 +2713,7 @@ SMLoc ExprLoc = getLoc(); if (getParser().parseExpression(ImmVal)) return MatchOperand_ParseFail; - const MCConstantExpr *MCE = dyn_cast(ImmVal); + const auto *MCE = dyn_cast(ImmVal); if (!MCE) { Error(ExprLoc, "immediate value expected for barrier operand"); return MatchOperand_ParseFail; @@ -2807,7 +2807,7 @@ const MCExpr *ImmVal; if (getParser().parseExpression(ImmVal)) return false; - const MCConstantExpr *MCE = dyn_cast(ImmVal); + const auto *MCE = dyn_cast(ImmVal); if (!MCE) { TokError("immediate value expected for vector index"); return false; @@ -3007,7 +3007,7 @@ const MCExpr *ImmVal; if (getParser().parseExpression(ImmVal)) return false; - const MCConstantExpr *MCE = dyn_cast(ImmVal); + const auto *MCE = dyn_cast(ImmVal); if (!MCE) { TokError("immediate value expected for vector index"); return false; @@ -3690,7 +3690,7 @@ AArch64Operand &Op2 = static_cast(*Operands[2]); AArch64Operand &Op3 = static_cast(*Operands[3]); if (Op2.isReg() && Op3.isImm()) { - const MCConstantExpr *Op3CE = dyn_cast(Op3.getImm()); + const auto *Op3CE = dyn_cast(Op3.getImm()); if (Op3CE) { uint64_t Op3Val = Op3CE->getValue(); uint64_t NewOp3Val = 0; @@ -3722,8 +3722,8 @@ AArch64Operand WidthOp = static_cast(*Operands[3]); if (Op1.isReg() && LSBOp.isImm() && WidthOp.isImm()) { - const MCConstantExpr *LSBCE = dyn_cast(LSBOp.getImm()); - const MCConstantExpr *WidthCE = dyn_cast(WidthOp.getImm()); + const auto *LSBCE = dyn_cast(LSBOp.getImm()); + const auto *WidthCE = dyn_cast(WidthOp.getImm()); if (LSBCE && WidthCE) { uint64_t LSB = LSBCE->getValue(); @@ -3778,8 +3778,8 @@ AArch64Operand &Op4 = static_cast(*Operands[4]); if (Op1.isReg() && Op3.isImm() && Op4.isImm()) { - const MCConstantExpr *Op3CE = dyn_cast(Op3.getImm()); - const MCConstantExpr *Op4CE = dyn_cast(Op4.getImm()); + const auto *Op3CE = dyn_cast(Op3.getImm()); + const auto *Op4CE = dyn_cast(Op4.getImm()); if (Op3CE && Op4CE) { uint64_t Op3Val = Op3CE->getValue(); @@ -3842,8 +3842,8 @@ AArch64Operand &Op4 = static_cast(*Operands[4]); if (Op1.isReg() && Op3.isImm() && Op4.isImm()) { - const MCConstantExpr *Op3CE = dyn_cast(Op3.getImm()); - const MCConstantExpr *Op4CE = dyn_cast(Op4.getImm()); + const auto *Op3CE = dyn_cast(Op3.getImm()); + const auto *Op4CE = dyn_cast(Op4.getImm()); if (Op3CE && Op4CE) { uint64_t Op3Val = Op3CE->getValue(); @@ -4286,7 +4286,7 @@ const MCExpr *Expr; if (check(getParser().parseExpression(Expr), L, "expected expression")) return true; - const MCConstantExpr *Value = dyn_cast_or_null(Expr); + const auto *Value = dyn_cast_or_null(Expr); if (check(!Value, L, "expected constant expression")) return true; getTargetStreamer().emitInst(Value->getValue()); @@ -4433,19 +4433,19 @@ DarwinRefKind = MCSymbolRefExpr::VK_None; Addend = 0; - if (const AArch64MCExpr *AE = dyn_cast(Expr)) { + if (const auto *AE = dyn_cast(Expr)) { ELFRefKind = AE->getKind(); Expr = AE->getSubExpr(); } - const MCSymbolRefExpr *SE = dyn_cast(Expr); + const auto *SE = dyn_cast(Expr); if (SE) { // It's a simple symbol reference with no addend. DarwinRefKind = SE->getKind(); return true; } - const MCBinaryExpr *BE = dyn_cast(Expr); + const auto *BE = dyn_cast(Expr); if (!BE) return false; @@ -4540,7 +4540,7 @@ } if (!Op.isImm()) return Match_InvalidOperand; - const MCConstantExpr *CE = dyn_cast(Op.getImm()); + const auto *CE = dyn_cast(Op.getImm()); if (!CE) return Match_InvalidOperand; if (CE->getValue() == ExpectedVal) Index: lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp =================================================================== --- lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp +++ lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp @@ -1366,7 +1366,7 @@ } // If the branch target is simply an address then print it in hex. - const MCConstantExpr *BranchTarget = + const auto *BranchTarget = dyn_cast(MI->getOperand(OpNum).getExpr()); int64_t Address; if (BranchTarget && BranchTarget->evaluateAsAbsolute(Address)) { Index: lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp =================================================================== --- lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp +++ lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp @@ -272,7 +272,7 @@ // Set the shift bit of the add instruction for relocation types // R_AARCH64_TLSLE_ADD_TPREL_HI12 and R_AARCH64_TLSLD_ADD_DTPREL_HI12. - if (const AArch64MCExpr *A64E = dyn_cast(Expr)) { + if (const auto *A64E = dyn_cast(Expr)) { AArch64MCExpr::VariantKind RefKind = A64E->getKind(); if (RefKind == AArch64MCExpr::VK_TPREL_HI12 || RefKind == AArch64MCExpr::VK_DTPREL_HI12) @@ -532,7 +532,7 @@ if (UImm16MO.isImm()) return EncodedValue; - const AArch64MCExpr *A64E = cast(UImm16MO.getExpr()); + const auto *A64E = cast(UImm16MO.getExpr()); switch (A64E->getKind()) { case AArch64MCExpr::VK_DTPREL_G2: case AArch64MCExpr::VK_DTPREL_G1: Index: lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp =================================================================== --- lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp +++ lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp @@ -110,7 +110,7 @@ break; case MCExpr::Binary: { - const MCBinaryExpr *BE = cast(Expr); + const auto *BE = cast(Expr); fixELFSymbolsInTLSFixupsImpl(BE->getLHS(), Asm); fixELFSymbolsInTLSFixupsImpl(BE->getRHS(), Asm); break; Index: lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp =================================================================== --- lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp +++ lib/Target/AArch64/MCTargetDesc/AArch64MachObjectWriter.cpp @@ -127,7 +127,7 @@ // But only if they don't point to a few forbidden sections. if (!Symbol.isInSection()) return true; - const MCSectionMachO &RefSec = cast(Symbol.getSection()); + const auto &RefSec = cast(Symbol.getSection()); if (RefSec.getType() == MachO::S_CSTRING_LITERALS) return false; Index: lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp +++ lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp @@ -39,7 +39,7 @@ std::vector FuncsToClone; for (GlobalAlias &A : M.aliases()) { - if (Function* F = dyn_cast(A.getAliasee())) { + if (auto * F = dyn_cast(A.getAliasee())) { A.replaceAllUsesWith(F); AliasesToRemove.push_back(&A); } Index: lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp +++ lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp @@ -120,7 +120,7 @@ for (const BasicBlock &BB : F) { for (const Instruction &I : BB) { - if (const AddrSpaceCastInst *ASC = dyn_cast(&I)) { + if (const auto *ASC = dyn_cast(&I)) { if (castRequiresQueuePtr(ASC)) return true; } @@ -145,7 +145,7 @@ for (User *U : Intrin->users()) { // CallInst is the only valid user for an intrinsic. - CallInst *CI = cast(U); + auto *CI = cast(U); Function *CallingFunction = CI->getParent()->getParent(); if (SeenFuncs.insert(CallingFunction).second) Index: lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp +++ lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp @@ -138,7 +138,7 @@ // Thus we can ensure that memory not clobbered for memory // operations that live in kernel only. bool NotClobbered = isKernelFunc && !isClobberedInFunction(&I); - Instruction *PtrI = dyn_cast(Ptr); + auto *PtrI = dyn_cast(Ptr); if (!PtrI && NotClobbered && isGlobalLoad(I)) { if (isa(Ptr) || isa(Ptr)) { // Lookup for the existing GEP Index: lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp +++ lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp @@ -146,7 +146,7 @@ Value *AMDGPUCodeGenPrepare::copyFlags( const BinaryOperator &I, Value *V) const { - BinaryOperator *BinOp = dyn_cast(V); + auto *BinOp = dyn_cast(V); if (!BinOp) // Possibly constant expression. return V; @@ -310,7 +310,7 @@ } static bool shouldKeepFDivF32(Value *Num, bool UnsafeDiv) { - const ConstantFP *CNum = dyn_cast(Num); + const auto *CNum = dyn_cast(Num); if (!CNum) return false; @@ -332,7 +332,7 @@ if (!FPMath) return false; - const FPMathOperator *FPOp = cast(&FDiv); + const auto *FPOp = cast(&FDiv); float ULP = FPOp->getFPAccuracy(); if (ULP < 2.5f) return false; @@ -356,7 +356,7 @@ Value *NewFDiv = nullptr; - if (VectorType *VT = dyn_cast(Ty)) { + if (auto *VT = dyn_cast(Ty)) { NewFDiv = UndefValue::get(VT); // FIXME: Doesn't do the right thing for cases where the vector is partially Index: lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp +++ lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp @@ -191,10 +191,10 @@ const SIInstrInfo *TII = static_cast(Subtarget)->getInstrInfo(); - if (const ConstantSDNode *C = dyn_cast(N)) + if (const auto *C = dyn_cast(N)) return TII->isInlineConstant(C->getAPIntValue()); - if (const ConstantFPSDNode *C = dyn_cast(N)) + if (const auto *C = dyn_cast(N)) return TII->isInlineConstant(C->getValueAPF().bitcastToAPInt()); return false; @@ -436,10 +436,10 @@ break; uint64_t Imm; - if (ConstantFPSDNode *FP = dyn_cast(N)) + if (auto *FP = dyn_cast(N)) Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue(); else { - ConstantSDNode *C = cast(N); + auto *C = cast(N); Imm = C->getZExtValue(); } @@ -478,11 +478,11 @@ // TODO: Technically we could try to pattern match scalar bitshifts of // dynamic values, but it's probably not useful. - ConstantSDNode *Offset = dyn_cast(N->getOperand(1)); + auto *Offset = dyn_cast(N->getOperand(1)); if (!Offset) break; - ConstantSDNode *Width = dyn_cast(N->getOperand(2)); + auto *Width = dyn_cast(N->getOperand(2)); if (!Width) break; @@ -553,7 +553,7 @@ bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr) { - if (ConstantSDNode *Cst = dyn_cast(Addr)) { + if (auto *Cst = dyn_cast(Addr)) { IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, SDLoc(Addr), true); return true; @@ -754,7 +754,7 @@ if (CurDAG->isBaseWithConstantOffset(Addr)) { SDValue N0 = Addr.getOperand(0); SDValue N1 = Addr.getOperand(1); - ConstantSDNode *C1 = cast(N1); + auto *C1 = cast(N1); if (isDSOffsetLegal(N0, C1->getSExtValue(), 16)) { // (add n0, c0) Base = N0; @@ -763,7 +763,7 @@ } } else if (Addr.getOpcode() == ISD::SUB) { // sub C, x -> add (sub 0, x), C - if (const ConstantSDNode *C = dyn_cast(Addr.getOperand(0))) { + if (const auto *C = dyn_cast(Addr.getOperand(0))) { int64_t ByteOffset = C->getSExtValue(); if (isUInt<16>(ByteOffset)) { SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32); @@ -785,7 +785,7 @@ } } } - } else if (const ConstantSDNode *CAddr = dyn_cast(Addr)) { + } else if (const auto *CAddr = dyn_cast(Addr)) { // If we have a constant address, prefer to put the constant into the // offset. This can save moves to load the constant address since multiple // operations can share the zero base address register, and enables merging @@ -818,7 +818,7 @@ if (CurDAG->isBaseWithConstantOffset(Addr)) { SDValue N0 = Addr.getOperand(0); SDValue N1 = Addr.getOperand(1); - ConstantSDNode *C1 = cast(N1); + auto *C1 = cast(N1); unsigned DWordOffset0 = C1->getZExtValue() / 4; unsigned DWordOffset1 = DWordOffset0 + 1; // (add n0, c0) @@ -830,7 +830,7 @@ } } else if (Addr.getOpcode() == ISD::SUB) { // sub C, x -> add (sub 0, x), C - if (const ConstantSDNode *C = dyn_cast(Addr.getOperand(0))) { + if (const auto *C = dyn_cast(Addr.getOperand(0))) { unsigned DWordOffset0 = C->getZExtValue() / 4; unsigned DWordOffset1 = DWordOffset0 + 1; @@ -856,7 +856,7 @@ } } } - } else if (const ConstantSDNode *CAddr = dyn_cast(Addr)) { + } else if (const auto *CAddr = dyn_cast(Addr)) { unsigned DWordOffset0 = CAddr->getZExtValue() / 4; unsigned DWordOffset1 = DWordOffset0 + 1; assert(4 * DWordOffset0 == CAddr->getZExtValue()); @@ -913,7 +913,7 @@ if (CurDAG->isBaseWithConstantOffset(Addr)) { SDValue N0 = Addr.getOperand(0); SDValue N1 = Addr.getOperand(1); - ConstantSDNode *C1 = cast(N1); + auto *C1 = cast(N1); if (N0.getOpcode() == ISD::ADD) { // (add (add N2, N3), C1) -> addr64 @@ -976,7 +976,7 @@ GLC, SLC, TFE)) return false; - ConstantSDNode *C = cast(Addr64); + auto *C = cast(Addr64); if (C->getSExtValue()) { SDLoc DL(Addr); @@ -1023,7 +1023,7 @@ SDValue N1 = Addr.getOperand(1); // Offsets in vaddr must be positive. - ConstantSDNode *C1 = cast(N1); + auto *C1 = cast(N1); if (isLegalMUBUFImmOffset(C1)) { VAddr = foldFrameIndex(N0); ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16); @@ -1192,7 +1192,7 @@ SDValue &Offset, bool &Imm) const { // FIXME: Handle non-constant offsets. - ConstantSDNode *C = dyn_cast(ByteOffsetNode); + auto *C = dyn_cast(ByteOffsetNode); if (!C) return false; @@ -1300,7 +1300,7 @@ if (CurDAG->isBaseWithConstantOffset(Index)) { SDValue N0 = Index.getOperand(0); SDValue N1 = Index.getOperand(1); - ConstantSDNode *C1 = cast(N1); + auto *C1 = cast(N1); // (add n0, c0) Base = N0; @@ -1334,8 +1334,8 @@ // Predicate: 0 < b <= c < 32 const SDValue &Shl = N->getOperand(0); - ConstantSDNode *B = dyn_cast(Shl->getOperand(1)); - ConstantSDNode *C = dyn_cast(N->getOperand(1)); + auto *B = dyn_cast(Shl->getOperand(1)); + auto *C = dyn_cast(N->getOperand(1)); if (B && C) { uint32_t BVal = B->getZExtValue(); @@ -1360,8 +1360,8 @@ // "(a srl b) & mask" ---> "BFE_U32 a, b, popcount(mask)" // Predicate: isMask(mask) const SDValue &Srl = N->getOperand(0); - ConstantSDNode *Shift = dyn_cast(Srl.getOperand(1)); - ConstantSDNode *Mask = dyn_cast(N->getOperand(1)); + auto *Shift = dyn_cast(Srl.getOperand(1)); + auto *Mask = dyn_cast(N->getOperand(1)); if (Shift && Mask) { uint32_t ShiftVal = Shift->getZExtValue(); @@ -1382,8 +1382,8 @@ // "(a & mask) srl b)" ---> "BFE_U32 a, b, popcount(mask >> b)" // Predicate: isMask(mask >> b) const SDValue &And = N->getOperand(0); - ConstantSDNode *Shift = dyn_cast(N->getOperand(1)); - ConstantSDNode *Mask = dyn_cast(And->getOperand(1)); + auto *Shift = dyn_cast(N->getOperand(1)); + auto *Mask = dyn_cast(And->getOperand(1)); if (Shift && Mask) { uint32_t ShiftVal = Shift->getZExtValue(); @@ -1415,7 +1415,7 @@ if (Src.getOpcode() != ISD::SRL) break; - const ConstantSDNode *Amt = dyn_cast(Src.getOperand(1)); + const auto *Amt = dyn_cast(Src.getOperand(1)); if (!Amt) break; @@ -1475,7 +1475,7 @@ // This is here because there isn't a way to use the generated sub0_sub1 as the // subreg index to EXTRACT_SUBREG in tablegen. void AMDGPUDAGToDAGISel::SelectATOMIC_CMP_SWAP(SDNode *N) { - MemSDNode *Mem = cast(N); + auto *Mem = cast(N); unsigned AS = Mem->getAddressSpace(); if (AS == AMDGPUAS::FLAT_ADDRESS) { SelectCode(N); @@ -1611,7 +1611,7 @@ IsModified = false; // Go over all selected nodes and try to fold them a bit more for (SDNode &Node : CurDAG->allnodes()) { - MachineSDNode *MachineNode = dyn_cast(&Node); + auto *MachineNode = dyn_cast(&Node); if (!MachineNode) continue; Index: lib/Target/AMDGPU/AMDGPUISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -780,9 +780,9 @@ StringRef FuncName(""); - if (const ExternalSymbolSDNode *G = dyn_cast(Callee)) + if (const auto *G = dyn_cast(Callee)) FuncName = G->getSymbol(); - else if (const GlobalAddressSDNode *G = dyn_cast(Callee)) + else if (const auto *G = dyn_cast(Callee)) FuncName = G->getGlobal()->getName(); DiagnosticInfoUnsupported NoCalls( @@ -858,7 +858,7 @@ } static bool hasDefinedInitializer(const GlobalValue *GV) { - const GlobalVariable *GVar = dyn_cast(GV); + const auto *GVar = dyn_cast(GV); if (!GVar || !GVar->hasInitializer()) return false; @@ -870,7 +870,7 @@ SelectionDAG &DAG) const { const DataLayout &DL = DAG.getDataLayout(); - GlobalAddressSDNode *G = cast(Op); + auto *G = cast(Op); const GlobalValue *GV = G->getGlobal(); switch (G->getAddressSpace()) { @@ -1053,7 +1053,7 @@ SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op, SelectionDAG &DAG) const { - LoadSDNode *Load = cast(Op); + auto *Load = cast(Op); EVT VT = Op.getValueType(); @@ -1102,7 +1102,7 @@ SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op, SelectionDAG &DAG) const { - StoreSDNode *Store = cast(Op); + auto *Store = cast(Op); SDValue Val = Store->getValue(); EVT VT = Val.getValueType(); @@ -2226,7 +2226,7 @@ static bool hasVolatileUser(SDNode *Val) { for (SDNode *U : Val->uses()) { - if (MemSDNode *M = dyn_cast(U)) { + if (auto *M = dyn_cast(U)) { if (M->isVolatile()) return true; } @@ -2261,7 +2261,7 @@ if (!DCI.isBeforeLegalize()) return SDValue(); - LoadSDNode *LN = cast(N); + auto *LN = cast(N); if (LN->isVolatile() || !ISD::isNormalLoad(LN) || hasVolatileUser(LN)) return SDValue(); @@ -2312,7 +2312,7 @@ if (!DCI.isBeforeLegalize()) return SDValue(); - StoreSDNode *SN = cast(N); + auto *SN = cast(N); if (SN->isVolatile() || !ISD::isNormalStore(SN)) return SDValue(); @@ -2395,7 +2395,7 @@ // On some subtargets, 64-bit shift is a quarter rate instruction. In the // common case, splitting this into a move and a 32-bit shift is faster and // the same code size. - const ConstantSDNode *RHS = dyn_cast(N->getOperand(1)); + const auto *RHS = dyn_cast(N->getOperand(1)); if (!RHS) return SDValue(); @@ -2424,7 +2424,7 @@ if (N->getValueType(0) != MVT::i64) return SDValue(); - const ConstantSDNode *RHS = dyn_cast(N->getOperand(1)); + const auto *RHS = dyn_cast(N->getOperand(1)); if (!RHS) return SDValue(); @@ -2459,7 +2459,7 @@ if (N->getValueType(0) != MVT::i64) return SDValue(); - const ConstantSDNode *RHS = dyn_cast(N->getOperand(1)); + const auto *RHS = dyn_cast(N->getOperand(1)); if (!RHS) return SDValue(); @@ -2621,7 +2621,7 @@ } static bool isNegativeOne(SDValue Val) { - if (ConstantSDNode *C = dyn_cast(Val)) + if (auto *C = dyn_cast(Val)) return C->isAllOnesValue(); return false; } @@ -2659,7 +2659,7 @@ SDValue AMDGPUTargetLowering::performCtlzCombine(const SDLoc &SL, SDValue Cond, SDValue LHS, SDValue RHS, DAGCombinerInfo &DCI) const { - ConstantSDNode *CmpRhs = dyn_cast(Cond.getOperand(1)); + auto *CmpRhs = dyn_cast(Cond.getOperand(1)); if (!CmpRhs || !CmpRhs->isNullValue()) return SDValue(); @@ -2757,7 +2757,7 @@ // v2i32 (bitcast i64:k) -> build_vector lo_32(k), hi_32(k) // TODO: Generalize and move to DAGCombiner SDValue Src = N->getOperand(0); - if (ConstantSDNode *C = dyn_cast(Src)) { + if (auto *C = dyn_cast(Src)) { assert(Src.getValueType() == MVT::i64); SDLoc SL(N); uint64_t CVal = C->getZExtValue(); @@ -2766,7 +2766,7 @@ DAG.getConstant(Hi_32(CVal), SL, MVT::i32)); } - if (ConstantFPSDNode *C = dyn_cast(Src)) { + if (auto *C = dyn_cast(Src)) { const APInt &Val = C->getValueAPF().bitcastToAPInt(); SDLoc SL(N); uint64_t CVal = Val.getZExtValue(); @@ -2821,7 +2821,7 @@ case AMDGPUISD::BFE_U32: { assert(!N->getValueType(0).isVector() && "Vector handling of BFE not implemented"); - ConstantSDNode *Width = dyn_cast(N->getOperand(2)); + auto *Width = dyn_cast(N->getOperand(2)); if (!Width) break; @@ -2829,7 +2829,7 @@ if (WidthVal == 0) return DAG.getConstant(0, DL, MVT::i32); - ConstantSDNode *Offset = dyn_cast(N->getOperand(1)); + auto *Offset = dyn_cast(N->getOperand(1)); if (!Offset) break; @@ -2861,7 +2861,7 @@ return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT); } - if (ConstantSDNode *CVal = dyn_cast(BitsFrom)) { + if (auto *CVal = dyn_cast(BitsFrom)) { if (Signed) { return constantFoldBFE(DAG, CVal->getSExtValue(), @@ -3103,7 +3103,7 @@ case AMDGPUISD::BFE_I32: case AMDGPUISD::BFE_U32: { - ConstantSDNode *CWidth = dyn_cast(Op.getOperand(2)); + auto *CWidth = dyn_cast(Op.getOperand(2)); if (!CWidth) return; @@ -3124,7 +3124,7 @@ unsigned Depth) const { switch (Op.getOpcode()) { case AMDGPUISD::BFE_I32: { - ConstantSDNode *Width = dyn_cast(Op.getOperand(2)); + auto *Width = dyn_cast(Op.getOperand(2)); if (!Width) return 1; @@ -3138,7 +3138,7 @@ } case AMDGPUISD::BFE_U32: { - ConstantSDNode *Width = dyn_cast(Op.getOperand(2)); + auto *Width = dyn_cast(Op.getOperand(2)); return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1; } Index: lib/Target/AMDGPU/AMDGPUIntrinsicInfo.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUIntrinsicInfo.cpp +++ lib/Target/AMDGPU/AMDGPUIntrinsicInfo.cpp @@ -94,7 +94,7 @@ Function *AMDGPUIntrinsicInfo::getDeclaration(Module *M, unsigned IntrID, ArrayRef Tys) const { FunctionType *FTy = getType(M->getContext(), IntrID, Tys); - Function *F + auto *F = cast(M->getOrInsertFunction(getName(IntrID, Tys), FTy)); AttributeSet AS = getAttributes(M->getContext(), Index: lib/Target/AMDGPU/AMDGPUOpenCLImageTypeLoweringPass.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUOpenCLImageTypeLoweringPass.cpp +++ lib/Target/AMDGPU/AMDGPUOpenCLImageTypeLoweringPass.cpp @@ -88,7 +88,7 @@ // Sanity checks. size_t ExpectNumArgNodeOps = F->arg_size() + 1; for (size_t i = 0; i < NumKernelArgMDNodes; ++i) { - MDNode *ArgNode = dyn_cast_or_null(Node->getOperand(i + 1)); + auto *ArgNode = dyn_cast_or_null(Node->getOperand(i + 1)); if (ArgNode->getNumOperands() != ExpectNumArgNodeOps) return nullptr; if (!ArgNode->getOperand(0)) @@ -96,7 +96,7 @@ // FIXME: It should be possible to do image lowering when some metadata // args missing or not in the expected order. - MDString *StringNode = dyn_cast(ArgNode->getOperand(0)); + auto *StringNode = dyn_cast(ArgNode->getOperand(0)); if (!StringNode || StringNode->getString() != KernelArgMDNodeNames[i]) return nullptr; } @@ -106,13 +106,13 @@ static StringRef AccessQualFromMD(MDNode *KernelMDNode, unsigned ArgIdx) { - MDNode *ArgAQNode = cast(KernelMDNode->getOperand(2)); + auto *ArgAQNode = cast(KernelMDNode->getOperand(2)); return cast(ArgAQNode->getOperand(ArgIdx + 1))->getString(); } static StringRef ArgTypeFromMD(MDNode *KernelMDNode, unsigned ArgIdx) { - MDNode *ArgTypeNode = cast(KernelMDNode->getOperand(3)); + auto *ArgTypeNode = cast(KernelMDNode->getOperand(3)); return cast(ArgTypeNode->getOperand(ArgIdx + 1))->getString(); } @@ -120,7 +120,7 @@ GetArgMD(MDNode *KernelMDNode, unsigned OpIdx) { MDVector Res; for (unsigned i = 0; i < NumKernelArgMDNodes; ++i) { - MDNode *Node = cast(KernelMDNode->getOperand(i + 1)); + auto *Node = cast(KernelMDNode->getOperand(i + 1)); Res.push_back(Node->getOperand(OpIdx)); } return Res; Index: lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp +++ lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp @@ -132,7 +132,7 @@ // possible these arguments require the entire local memory space, so // we cannot use local memory in the pass. for (Type *ParamTy : FTy->params()) { - PointerType *PtrTy = dyn_cast(ParamTy); + auto *PtrTy = dyn_cast(ParamTy); if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { LocalMemLimit = 0; DEBUG(dbgs() << "Function has local memory argument. Promoting to " @@ -154,7 +154,7 @@ continue; for (const User *U : GV.users()) { - const Instruction *Use = dyn_cast(U); + const auto *Use = dyn_cast(U); if (!Use) continue; @@ -214,7 +214,7 @@ BasicBlock &EntryBB = *F.begin(); for (auto I = EntryBB.begin(), E = EntryBB.end(); I != E; ) { - AllocaInst *AI = dyn_cast(I); + auto *AI = dyn_cast(I); ++I; if (AI) @@ -345,7 +345,7 @@ static Value * calculateVectorIndex(Value *Ptr, const std::map &GEPIdx) { - GetElementPtrInst *GEP = cast(Ptr); + auto *GEP = cast(Ptr); auto I = GEPIdx.find(GEP); return I == GEPIdx.end() ? nullptr : I->second; @@ -356,7 +356,7 @@ if (GEP->getNumOperands() != 3) return nullptr; - ConstantInt *I0 = dyn_cast(GEP->getOperand(1)); + auto *I0 = dyn_cast(GEP->getOperand(1)); if (!I0 || !I0->isZero()) return nullptr; @@ -375,7 +375,7 @@ return true; case Instruction::Store: { // Must be the stored pointer operand, not a stored value. - StoreInst *SI = cast(Inst); + auto *SI = cast(Inst); return SI->getPointerOperand() == User; } default: @@ -384,7 +384,7 @@ } static bool tryPromoteAllocaToVector(AllocaInst *Alloca) { - ArrayType *AllocaTy = dyn_cast(Alloca->getAllocatedType()); + auto *AllocaTy = dyn_cast(Alloca->getAllocatedType()); DEBUG(dbgs() << "Alloca candidate for vectorization\n"); @@ -401,7 +401,7 @@ std::map GEPVectorIdx; std::vector WorkList; for (User *AllocaUser : Alloca->users()) { - GetElementPtrInst *GEP = dyn_cast(AllocaUser); + auto *GEP = dyn_cast(AllocaUser); if (!GEP) { if (!canVectorizeInst(cast(AllocaUser), Alloca)) return false; @@ -434,7 +434,7 @@ << *AllocaTy << " -> " << *VectorTy << '\n'); for (Value *V : WorkList) { - Instruction *Inst = cast(V); + auto *Inst = cast(V); IRBuilder<> Builder(Inst); switch (Inst->getOpcode()) { case Instruction::Load: { @@ -475,7 +475,7 @@ } static bool isCallPromotable(CallInst *CI) { - IntrinsicInst *II = dyn_cast(CI); + auto *II = dyn_cast(CI); if (!II) return false; @@ -535,7 +535,7 @@ if (is_contained(WorkList, User)) continue; - if (CallInst *CI = dyn_cast(User)) { + if (auto *CI = dyn_cast(User)) { if (!isCallPromotable(CI)) return false; @@ -543,35 +543,35 @@ continue; } - Instruction *UseInst = cast(User); + auto *UseInst = cast(User); if (UseInst->getOpcode() == Instruction::PtrToInt) return false; - if (LoadInst *LI = dyn_cast(UseInst)) { + if (auto *LI = dyn_cast(UseInst)) { if (LI->isVolatile()) return false; continue; } - if (StoreInst *SI = dyn_cast(UseInst)) { + if (auto *SI = dyn_cast(UseInst)) { if (SI->isVolatile()) return false; // Reject if the stored value is not the pointer operand. if (SI->getPointerOperand() != Val) return false; - } else if (AtomicRMWInst *RMW = dyn_cast(UseInst)) { + } else if (auto *RMW = dyn_cast(UseInst)) { if (RMW->isVolatile()) return false; - } else if (AtomicCmpXchgInst *CAS = dyn_cast(UseInst)) { + } else if (auto *CAS = dyn_cast(UseInst)) { if (CAS->isVolatile()) return false; } // Only promote a select if we know that the other select operand // is from another pointer that will also be promoted. - if (ICmpInst *ICmp = dyn_cast(UseInst)) { + if (auto *ICmp = dyn_cast(UseInst)) { if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1)) return false; @@ -588,7 +588,7 @@ if (!User->getType()->isPointerTy()) continue; - if (GetElementPtrInst *GEP = dyn_cast(UseInst)) { + if (auto *GEP = dyn_cast(UseInst)) { // Be conservative if an address could be computed outside the bounds of // the alloca. if (!GEP->isInBounds()) @@ -597,13 +597,13 @@ // Only promote a select if we know that the other select operand is from // another pointer that will also be promoted. - if (SelectInst *SI = dyn_cast(UseInst)) { + if (auto *SI = dyn_cast(UseInst)) { if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2)) return false; } // Repeat for phis. - if (PHINode *Phi = dyn_cast(UseInst)) { + if (auto *Phi = dyn_cast(UseInst)) { // TODO: Handle more complex cases. We should be able to replace loops // over arrays. switch (Phi->getNumIncomingValues()) { @@ -729,9 +729,9 @@ I.eraseFromParent(); for (Value *V : WorkList) { - CallInst *Call = dyn_cast(V); + auto *Call = dyn_cast(V); if (!Call) { - if (ICmpInst *CI = dyn_cast(V)) { + if (auto *CI = dyn_cast(V)) { Value *Src0 = CI->getOperand(0); Type *EltTy = Src0->getType()->getPointerElementType(); PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS); @@ -758,13 +758,13 @@ V->mutateType(NewTy); // Adjust the types of any constant operands. - if (SelectInst *SI = dyn_cast(V)) { + if (auto *SI = dyn_cast(V)) { if (isa(SI->getOperand(1))) SI->setOperand(1, ConstantPointerNull::get(NewTy)); if (isa(SI->getOperand(2))) SI->setOperand(2, ConstantPointerNull::get(NewTy)); - } else if (PHINode *Phi = dyn_cast(V)) { + } else if (auto *Phi = dyn_cast(V)) { for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) { if (isa(Phi->getIncomingValue(I))) Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy)); @@ -774,7 +774,7 @@ continue; } - IntrinsicInst *Intr = cast(Call); + auto *Intr = cast(Call); Builder.SetInsertPoint(Intr); switch (Intr->getIntrinsicID()) { case Intrinsic::lifetime_start: @@ -783,7 +783,7 @@ Intr->eraseFromParent(); continue; case Intrinsic::memcpy: { - MemCpyInst *MemCpy = cast(Intr); + auto *MemCpy = cast(Intr); Builder.CreateMemCpy(MemCpy->getRawDest(), MemCpy->getRawSource(), MemCpy->getLength(), MemCpy->getAlignment(), MemCpy->isVolatile()); @@ -791,7 +791,7 @@ continue; } case Intrinsic::memmove: { - MemMoveInst *MemMove = cast(Intr); + auto *MemMove = cast(Intr); Builder.CreateMemMove(MemMove->getRawDest(), MemMove->getRawSource(), MemMove->getLength(), MemMove->getAlignment(), MemMove->isVolatile()); @@ -799,7 +799,7 @@ continue; } case Intrinsic::memset: { - MemSetInst *MemSet = cast(Intr); + auto *MemSet = cast(Intr); Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(), MemSet->getLength(), MemSet->getAlignment(), MemSet->isVolatile()); Index: lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp +++ lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp @@ -41,12 +41,12 @@ for (const BasicBlock *BB : L->getBlocks()) { const DataLayout &DL = BB->getModule()->getDataLayout(); for (const Instruction &I : *BB) { - const GetElementPtrInst *GEP = dyn_cast(&I); + const auto *GEP = dyn_cast(&I); if (!GEP || GEP->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS) continue; const Value *Ptr = GEP->getPointerOperand(); - const AllocaInst *Alloca = + const auto *Alloca = dyn_cast(GetUnderlyingObject(Ptr, DL)); if (Alloca) { // We want to do whatever we can to limit the number of alloca @@ -307,7 +307,7 @@ /// different across workitems in a wavefront. bool AMDGPUTTIImpl::isSourceOfDivergence(const Value *V) const { - if (const Argument *A = dyn_cast(V)) + if (const auto *A = dyn_cast(V)) return !isArgPassedInSGPR(A); // Loads from the private address space are divergent, because threads @@ -316,7 +316,7 @@ // // All other loads are not divergent, because if threads issue loads with the // same arguments, they will always get the same result. - if (const LoadInst *Load = dyn_cast(V)) + if (const auto *Load = dyn_cast(V)) return Load->getPointerAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS; // Atomics are divergent because they are executed sequentially: when an @@ -326,7 +326,7 @@ if (isa(V) || isa(V)) return true; - if (const IntrinsicInst *Intrinsic = dyn_cast(V)) { + if (const auto *Intrinsic = dyn_cast(V)) { const TargetMachine &TM = getTLI()->getTargetMachine(); return isIntrinsicSourceOfDivergence(TM.getIntrinsicInfo(), Intrinsic); } Index: lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp =================================================================== --- lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp +++ lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp @@ -419,7 +419,7 @@ StringRef getExpressionAsToken() const { assert(isExpr()); - const MCSymbolRefExpr *S = cast(Expr); + const auto *S = cast(Expr); return S->getSymbol().getName(); } Index: lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp =================================================================== --- lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp +++ lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp @@ -174,7 +174,7 @@ void AMDGPUTargetELFStreamer::EmitAMDGPUSymbolType(StringRef SymbolName, unsigned Type) { - MCSymbolELF *Symbol = cast( + auto *Symbol = cast( getStreamer().getContext().getOrCreateSymbol(SymbolName)); Symbol->setType(ELF::STT_AMDGPU_HSA_KERNEL); } @@ -182,7 +182,7 @@ void AMDGPUTargetELFStreamer::EmitAMDGPUHsaModuleScopeGlobal( StringRef GlobalName) { - MCSymbolELF *Symbol = cast( + auto *Symbol = cast( getStreamer().getContext().getOrCreateSymbol(GlobalName)); Symbol->setType(ELF::STT_OBJECT); Symbol->setBinding(ELF::STB_LOCAL); @@ -191,7 +191,7 @@ void AMDGPUTargetELFStreamer::EmitAMDGPUHsaProgramScopeGlobal( StringRef GlobalName) { - MCSymbolELF *Symbol = cast( + auto *Symbol = cast( getStreamer().getContext().getOrCreateSymbol(GlobalName)); Symbol->setType(ELF::STT_OBJECT); Symbol->setBinding(ELF::STB_GLOBAL); @@ -280,7 +280,7 @@ } } case Type::VectorTyID: { - VectorType *VecTy = cast(Ty); + auto *VecTy = cast(Ty); Type *EleTy = VecTy->getElementType(); unsigned Size = VecTy->getVectorNumElements(); return (Twine(getOCLTypeName(EleTy, Signed)) + Twine(Size)).str(); Index: lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp =================================================================== --- lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp +++ lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp @@ -201,7 +201,7 @@ int64_t Imm; if (MO.isExpr()) { - const MCConstantExpr *C = dyn_cast(MO.getExpr()); + const auto *C = dyn_cast(MO.getExpr()); if (!C) return 255; @@ -263,7 +263,7 @@ if (Op.isImm()) Imm = Op.getImm(); else if (Op.isExpr()) { - if (const MCConstantExpr *C = dyn_cast(Op.getExpr())) + if (const auto *C = dyn_cast(Op.getExpr())) Imm = C->getValue(); } else if (!Op.isExpr()) // Exprs will be replaced with a fixup value. @@ -301,7 +301,7 @@ return MRI.getEncodingValue(MO.getReg()); if (MO.isExpr() && MO.getExpr()->getKind() != MCExpr::Constant) { - const MCSymbolRefExpr *Expr = dyn_cast(MO.getExpr()); + const auto *Expr = dyn_cast(MO.getExpr()); MCFixupKind Kind; if (Expr && Expr->getSymbol().isExternal()) Kind = FK_Data_4; Index: lib/Target/AMDGPU/R600ISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/R600ISelLowering.cpp +++ lib/Target/AMDGPU/R600ISelLowering.cpp @@ -689,7 +689,7 @@ SDValue R600TargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op, SelectionDAG &DAG) const { - GlobalAddressSDNode *GSD = cast(Op); + auto *GSD = cast(Op); if (GSD->getAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS) return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG); @@ -863,9 +863,9 @@ } bool R600TargetLowering::isZero(SDValue Op) const { - if(ConstantSDNode *Cst = dyn_cast(Op)) { + if(auto *Cst = dyn_cast(Op)) { return Cst->isNullValue(); - } else if(ConstantFPSDNode *CstFP = dyn_cast(Op)){ + } else if(auto *CstFP = dyn_cast(Op)){ return CstFP->isZero(); } else { return false; @@ -873,14 +873,14 @@ } bool R600TargetLowering::isHWTrueValue(SDValue Op) const { - if (ConstantFPSDNode * CFP = dyn_cast(Op)) { + if (auto * CFP = dyn_cast(Op)) { return CFP->isExactlyValue(1.0); } return isAllOnesConstant(Op); } bool R600TargetLowering::isHWFalseValue(SDValue Op) const { - if (ConstantFPSDNode * CFP = dyn_cast(Op)) { + if (auto * CFP = dyn_cast(Op)) { return CFP->getValueAPF().isZero(); } return isNullConstant(Op); @@ -1133,7 +1133,7 @@ } SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { - StoreSDNode *StoreNode = cast(Op); + auto *StoreNode = cast(Op); unsigned AS = StoreNode->getAddressSpace(); SDValue Value = StoreNode->getValue(); EVT ValueVT = Value.getValueType(); @@ -1296,7 +1296,7 @@ SDValue R600TargetLowering::lowerPrivateExtLoad(SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); - LoadSDNode *Load = cast(Op); + auto *Load = cast(Op); ISD::LoadExtType ExtType = Load->getExtensionType(); EVT MemVT = Load->getMemoryVT(); @@ -1350,7 +1350,7 @@ } SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { - LoadSDNode *LoadNode = cast(Op); + auto *LoadNode = cast(Op); unsigned AS = LoadNode->getAddressSpace(); EVT MemVT = LoadNode->getMemoryVT(); ISD::LoadExtType ExtType = LoadNode->getExtensionType(); @@ -1504,7 +1504,7 @@ MachineFunction &MF = DAG.getMachineFunction(); const R600FrameLowering *TFL = getSubtarget()->getFrameLowering(); - FrameIndexSDNode *FIN = cast(Op); + auto *FIN = cast(Op); unsigned FrameIndex = FIN->getIndex(); unsigned IgnoredFrameReg; @@ -1639,7 +1639,7 @@ // vector is undef. Thus we can use it to reduce 128 bits reg usage, // break false dependencies and additionnaly make assembly easier to read. RemapSwizzle[i] = 7; // SEL_MASK_WRITE - if (ConstantFPSDNode *C = dyn_cast(NewBldVec[i])) { + if (auto *C = dyn_cast(NewBldVec[i])) { if (C->isZero()) { RemapSwizzle[i] = 4; // SEL_0 NewBldVec[i] = DAG.getUNDEF(MVT::f32); @@ -1833,7 +1833,7 @@ case ISD::EXTRACT_VECTOR_ELT: { SDValue Arg = N->getOperand(0); if (Arg.getOpcode() == ISD::BUILD_VECTOR) { - if (ConstantSDNode *Const = dyn_cast(N->getOperand(1))) { + if (auto *Const = dyn_cast(N->getOperand(1))) { unsigned Element = Const->getZExtValue(); return Arg->getOperand(Element); } @@ -1842,7 +1842,7 @@ Arg.getOperand(0).getOpcode() == ISD::BUILD_VECTOR && (Arg.getOperand(0).getValueType().getVectorNumElements() == Arg.getValueType().getVectorNumElements())) { - if (ConstantSDNode *Const = dyn_cast(N->getOperand(1))) { + if (auto *Const = dyn_cast(N->getOperand(1))) { unsigned Element = Const->getZExtValue(); return DAG.getNode(ISD::BITCAST, DL, N->getVTList(), Arg->getOperand(0).getOperand(Element)); @@ -2006,17 +2006,17 @@ OtherSrcIdx--; OtherSelIdx--; } - if (RegisterSDNode *Reg = + if (auto *Reg = dyn_cast(ParentNode->getOperand(OtherSrcIdx))) { if (Reg->getReg() == AMDGPU::ALU_CONST) { - ConstantSDNode *Cst + auto *Cst = cast(ParentNode->getOperand(OtherSelIdx)); Consts.push_back(Cst->getZExtValue()); } } } - ConstantSDNode *Cst = cast(CstOffset); + auto *Cst = cast(CstOffset); Consts.push_back(Cst->getZExtValue()); if (!TII->fitsConstReadLimitations(Consts)) { return false; @@ -2039,7 +2039,7 @@ uint64_t ImmValue = 0; if (Src.getMachineOpcode() == AMDGPU::MOV_IMM_F32) { - ConstantFPSDNode *FPC = dyn_cast(Src.getOperand(0)); + auto *FPC = dyn_cast(Src.getOperand(0)); float FloatValue = FPC->getValueAPF().convertToFloat(); if (FloatValue == 0.0) { ImmReg = AMDGPU::ZERO; @@ -2051,7 +2051,7 @@ ImmValue = FPC->getValueAPF().bitcastToAPInt().getZExtValue(); } } else { - ConstantSDNode *C = dyn_cast(Src.getOperand(0)); + auto *C = dyn_cast(Src.getOperand(0)); uint64_t Value = C->getZExtValue(); if (Value == 0) { ImmReg = AMDGPU::ZERO; @@ -2068,7 +2068,7 @@ if (ImmReg == AMDGPU::ALU_LITERAL_X) { if (!Imm.getNode()) return false; - ConstantSDNode *C = dyn_cast(Imm); + auto *C = dyn_cast(Imm); assert(C); if (C->getZExtValue()) return false; Index: lib/Target/AMDGPU/SIAnnotateControlFlow.cpp =================================================================== --- lib/Target/AMDGPU/SIAnnotateControlFlow.cpp +++ lib/Target/AMDGPU/SIAnnotateControlFlow.cpp @@ -286,7 +286,7 @@ // of parent, particularly when we're dealing with a multi-level // break, but it should occur within a group of intrinsic calls // at the beginning of the block. - CallInst *OldEnd = dyn_cast(Parent->getFirstInsertionPt()); + auto *OldEnd = dyn_cast(Parent->getFirstInsertionPt()); while (OldEnd && OldEnd->getCalledFunction() != EndCf) OldEnd = dyn_cast(OldEnd->getNextNode()); if (OldEnd && OldEnd->getCalledFunction() == EndCf) { @@ -302,7 +302,7 @@ eraseIfUnused(Phi); return Ret; - } else if (Instruction *Inst = dyn_cast(Cond)) { + } else if (auto *Inst = dyn_cast(Cond)) { BasicBlock *Parent = Inst->getParent(); Instruction *Insert; if (L->contains(Inst)) { @@ -386,7 +386,7 @@ for (df_iterator I = df_begin(&F.getEntryBlock()), E = df_end(&F.getEntryBlock()); I != E; ++I) { - BranchInst *Term = dyn_cast((*I)->getTerminator()); + auto *Term = dyn_cast((*I)->getTerminator()); if (!Term || Term->isUnconditional()) { if (isTopOfStack(*I)) @@ -404,7 +404,7 @@ } if (isTopOfStack(*I)) { - PHINode *Phi = dyn_cast(Term->getCondition()); + auto *Phi = dyn_cast(Term->getCondition()); if (Phi && Phi->getParent() == *I && isElse(Phi)) { insertElse(Term); eraseIfUnused(Phi); Index: lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/SIISelLowering.cpp +++ lib/Target/AMDGPU/SIISelLowering.cpp @@ -611,9 +611,9 @@ } bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const { - const MemSDNode *MemNode = cast(N); + const auto *MemNode = cast(N); const Value *Ptr = MemNode->getMemOperand()->getValue(); - const Instruction *I = dyn_cast(Ptr); + const auto *I = dyn_cast(Ptr); return I && I->getMetadata("amdgpu.noclobber"); } @@ -628,7 +628,7 @@ } bool SITargetLowering::isMemOpUniform(const SDNode *N) const { - const MemSDNode *MemNode = cast(N); + const auto *MemNode = cast(N); const Value *Ptr = MemNode->getMemOperand()->getValue(); // UndefValue means this is a load of a kernel input. These are uniform. @@ -639,7 +639,7 @@ isa(Ptr) || isa(Ptr)) return true; - const Instruction *I = dyn_cast(Ptr); + const auto *I = dyn_cast(Ptr); return I && I->getMetadata("amdgpu.uniform"); } @@ -2114,7 +2114,7 @@ SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op, SelectionDAG &DAG) const { SDLoc SL(Op); - const AddrSpaceCastSDNode *ASC = cast(Op); + const auto *ASC = cast(Op); SDValue Src = ASC->getOperand(0); @@ -2213,7 +2213,7 @@ SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op, SelectionDAG &DAG) const { - GlobalAddressSDNode *GSD = cast(Op); + auto *GSD = cast(Op); if (GSD->getAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS && GSD->getAddressSpace() != AMDGPUAS::GLOBAL_ADDRESS) @@ -2564,7 +2564,7 @@ Op.getOperand(1), Op.getOperand(2)); case Intrinsic::amdgcn_div_scale: { // 3rd parameter required to be a constant. - const ConstantSDNode *Param = dyn_cast(Op.getOperand(3)); + const auto *Param = dyn_cast(Op.getOperand(3)); if (!Param) return DAG.getUNDEF(VT); @@ -2626,7 +2626,7 @@ switch (IntrID) { case Intrinsic::amdgcn_atomic_inc: case Intrinsic::amdgcn_atomic_dec: { - MemSDNode *M = cast(Op); + auto *M = cast(Op); unsigned Opc = (IntrID == Intrinsic::amdgcn_atomic_inc) ? AMDGPUISD::ATOMIC_INC : AMDGPUISD::ATOMIC_DEC; SDValue Ops[] = { @@ -2686,7 +2686,7 @@ } case AMDGPUIntrinsic::AMDGPU_kill: { SDValue Src = Op.getOperand(2); - if (const ConstantFPSDNode *K = dyn_cast(Src)) { + if (const auto *K = dyn_cast(Src)) { if (!K->isNegative()) return Chain; @@ -2698,11 +2698,11 @@ return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, Cast); } case AMDGPUIntrinsic::SI_export: { - const ConstantSDNode *En = cast(Op.getOperand(2)); - const ConstantSDNode *VM = cast(Op.getOperand(3)); - const ConstantSDNode *Done = cast(Op.getOperand(4)); - const ConstantSDNode *Tgt = cast(Op.getOperand(5)); - const ConstantSDNode *Compr = cast(Op.getOperand(6)); + const auto *En = cast(Op.getOperand(2)); + const auto *VM = cast(Op.getOperand(3)); + const auto *Done = cast(Op.getOperand(4)); + const auto *Tgt = cast(Op.getOperand(5)); + const auto *Compr = cast(Op.getOperand(6)); const SDValue Ops[] = { Chain, @@ -2727,7 +2727,7 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); - LoadSDNode *Load = cast(Op); + auto *Load = cast(Op); ISD::LoadExtType ExtType = Load->getExtensionType(); EVT MemVT = Load->getMemoryVT(); @@ -2870,7 +2870,7 @@ EVT VT = Op.getValueType(); bool Unsafe = DAG.getTarget().Options.UnsafeFPMath; - if (const ConstantFPSDNode *CLHS = dyn_cast(LHS)) { + if (const auto *CLHS = dyn_cast(LHS)) { if ((Unsafe || (VT == MVT::f32 && !Subtarget->hasFP32Denormals()))) { if (CLHS->isExactlyValue(1.0)) { @@ -3152,7 +3152,7 @@ SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); - StoreSDNode *Store = cast(Op); + auto *Store = cast(Op); EVT VT = Store->getMemoryVT(); if (VT == MVT::i1) { @@ -3237,7 +3237,7 @@ } SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const { - AtomicSDNode *AtomicNode = cast(Op); + auto *AtomicNode = cast(Op); assert(AtomicNode->isCompareAndSwap()); unsigned AS = AtomicNode->getAddressSpace(); @@ -3347,11 +3347,11 @@ if (N0.getOpcode() != ISD::ADD) return SDValue(); - const ConstantSDNode *CN1 = dyn_cast(N1); + const auto *CN1 = dyn_cast(N1); if (!CN1) return SDValue(); - const ConstantSDNode *CAdd = dyn_cast(N0.getOperand(1)); + const auto *CAdd = dyn_cast(N0.getOperand(1)); if (!CAdd) return SDValue(); @@ -3416,7 +3416,7 @@ if (VT == MVT::i64) { - const ConstantSDNode *CRHS = dyn_cast(RHS); + const auto *CRHS = dyn_cast(RHS); if (CRHS) { if (SDValue Split = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS)) @@ -3440,7 +3440,7 @@ return SDValue(); if (RCC == ISD::SETUNE) { - const ConstantFPSDNode *C1 = dyn_cast(RHS.getOperand(1)); + const auto *C1 = dyn_cast(RHS.getOperand(1)); if (!C1 || !C1->isInfinity() || C1->isNegative()) return SDValue(); @@ -3482,8 +3482,8 @@ if (Src != RHS.getOperand(0)) return SDValue(); - const ConstantSDNode *CLHS = dyn_cast(LHS.getOperand(1)); - const ConstantSDNode *CRHS = dyn_cast(RHS.getOperand(1)); + const auto *CLHS = dyn_cast(LHS.getOperand(1)); + const auto *CRHS = dyn_cast(RHS.getOperand(1)); if (!CLHS || !CRHS) return SDValue(); @@ -3529,7 +3529,7 @@ } } - const ConstantSDNode *CRHS = dyn_cast(N->getOperand(1)); + const auto *CRHS = dyn_cast(N->getOperand(1)); if (CRHS) { if (SDValue Split = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS)) @@ -3548,7 +3548,7 @@ SDValue LHS = N->getOperand(0); SDValue RHS = N->getOperand(1); - const ConstantSDNode *CRHS = dyn_cast(RHS); + const auto *CRHS = dyn_cast(RHS); if (CRHS) { if (SDValue Split = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS)) @@ -3564,7 +3564,7 @@ SDValue Mask = N->getOperand(1); // fp_class x, 0 -> false - if (const ConstantSDNode *CMask = dyn_cast(Mask)) { + if (const auto *CMask = dyn_cast(Mask)) { if (CMask->isNullValue()) return DAG.getConstant(0, SDLoc(N), MVT::i1); } @@ -3579,7 +3579,7 @@ SDValue SITargetLowering::performFCanonicalizeCombine( SDNode *N, DAGCombinerInfo &DCI) const { - ConstantFPSDNode *CFP = dyn_cast(N->getOperand(0)); + auto *CFP = dyn_cast(N->getOperand(0)); if (!CFP) return SDValue(); @@ -3636,11 +3636,11 @@ static SDValue performIntMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL, SDValue Op0, SDValue Op1, bool Signed) { - ConstantSDNode *K1 = dyn_cast(Op1); + auto *K1 = dyn_cast(Op1); if (!K1) return SDValue(); - ConstantSDNode *K0 = dyn_cast(Op0.getOperand(1)); + auto *K0 = dyn_cast(Op0.getOperand(1)); if (!K0) return SDValue(); @@ -3681,11 +3681,11 @@ static SDValue performFPMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL, SDValue Op0, SDValue Op1) { - ConstantFPSDNode *K1 = dyn_cast(Op1); + auto *K1 = dyn_cast(Op1); if (!K1) return SDValue(); - ConstantFPSDNode *K0 = dyn_cast(Op0.getOperand(1)); + auto *K0 = dyn_cast(Op0.getOperand(1)); if (!K0) return SDValue(); @@ -3783,7 +3783,7 @@ // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity)) ISD::CondCode CC = cast(N->getOperand(2))->get(); if (CC == ISD::SETOEQ && LHS.getOpcode() == ISD::FABS) { - const ConstantFPSDNode *CRHS = dyn_cast(RHS); + const auto *CRHS = dyn_cast(RHS); if (!CRHS) return SDValue(); @@ -3840,7 +3840,7 @@ // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x - if (const ConstantSDNode *C = + if (const auto *C = dyn_cast(Srl.getOperand(1))) { Srl = DAG.getZExtOrTrunc(Srl.getOperand(0), SDLoc(Srl.getOperand(0)), EVT(MVT::i32)); @@ -3972,7 +3972,7 @@ if (DCI.isBeforeLegalize()) break; - MemSDNode *MemNode = cast(N); + auto *MemNode = cast(N); SDValue Ptr = MemNode->getBasePtr(); // TODO: We could also do this for multiplies. Index: lib/Target/AMDGPU/SIInstrInfo.cpp =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.cpp +++ lib/Target/AMDGPU/SIInstrInfo.cpp @@ -143,9 +143,9 @@ if (Load0->getOperand(0) != Load1->getOperand(0)) return false; - const ConstantSDNode *Load0Offset = + const auto *Load0Offset = dyn_cast(Load0->getOperand(1)); - const ConstantSDNode *Load1Offset = + const auto *Load1Offset = dyn_cast(Load1->getOperand(1)); if (!Load0Offset || !Load1Offset) Index: lib/Target/AMDGPU/SITypeRewriter.cpp =================================================================== --- lib/Target/AMDGPU/SITypeRewriter.cpp +++ lib/Target/AMDGPU/SITypeRewriter.cpp @@ -113,7 +113,7 @@ Type::getInt32Ty(I.getContext())){ Type *ElementTy = Arg->getType()->getVectorElementType(); std::string TypeName = "i32"; - InsertElementInst *Def = cast(Arg); + auto *Def = cast(Arg); Args.push_back(Def->getOperand(1)); Types.push_back(ElementTy); std::string VecTypeName = "v1" + TypeName; @@ -143,7 +143,7 @@ return; } - if (BitCastInst *Op = dyn_cast(I.getOperand(0))) { + if (auto *Op = dyn_cast(I.getOperand(0))) { if (Op->getSrcTy() == v4i32) { I.replaceAllUsesWith(Op->getOperand(0)); I.eraseFromParent(); Index: lib/Target/ARM/ARMAsmPrinter.cpp =================================================================== --- lib/Target/ARM/ARMAsmPrinter.cpp +++ lib/Target/ARM/ARMAsmPrinter.cpp @@ -84,7 +84,7 @@ uint64_t Size = getDataLayout().getTypeAllocSize(CV->getType()); assert(Size && "C++ constructor pointer had zero size!"); - const GlobalValue *GV = dyn_cast(CV->stripPointerCasts()); + const auto *GV = dyn_cast(CV->stripPointerCasts()); assert(GV && "C++ constructor pointer was not a GlobalValue!"); const MCExpr *E = MCSymbolRefExpr::create(GetARMGVSymbol(GV, Index: lib/Target/ARM/ARMBaseInstrInfo.cpp =================================================================== --- lib/Target/ARM/ARMBaseInstrInfo.cpp +++ lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -3825,10 +3825,10 @@ } const MCInstrDesc &UseMCID = get(UseNode->getMachineOpcode()); - const MachineSDNode *DefMN = dyn_cast(DefNode); + const auto *DefMN = dyn_cast(DefNode); unsigned DefAlign = !DefMN->memoperands_empty() ? (*DefMN->memoperands_begin())->getAlignment() : 0; - const MachineSDNode *UseMN = dyn_cast(UseNode); + const auto *UseMN = dyn_cast(UseNode); unsigned UseAlign = !UseMN->memoperands_empty() ? (*UseMN->memoperands_begin())->getAlignment() : 0; int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign, @@ -4161,7 +4161,7 @@ MachineBasicBlock &MBB = *MI->getParent(); DebugLoc DL = MI->getDebugLoc(); unsigned Reg = MI->getOperand(0).getReg(); - const GlobalValue *GV = + const auto *GV = cast((*MI->memoperands_begin())->getValue()); MachineInstrBuilder MIB; Index: lib/Target/ARM/ARMConstantPoolValue.cpp =================================================================== --- lib/Target/ARM/ARMConstantPoolValue.cpp +++ lib/Target/ARM/ARMConstantPoolValue.cpp @@ -187,7 +187,7 @@ } bool ARMConstantPoolConstant::hasSameValue(ARMConstantPoolValue *ACPV) { - const ARMConstantPoolConstant *ACPC = dyn_cast(ACPV); + const auto *ACPC = dyn_cast(ACPV); return ACPC && ACPC->CVal == CVal && ARMConstantPoolValue::hasSameValue(ACPV); } @@ -225,7 +225,7 @@ } bool ARMConstantPoolSymbol::hasSameValue(ARMConstantPoolValue *ACPV) { - const ARMConstantPoolSymbol *ACPS = dyn_cast(ACPV); + const auto *ACPS = dyn_cast(ACPV); return ACPS && ACPS->S == S && ARMConstantPoolValue::hasSameValue(ACPV); } @@ -265,7 +265,7 @@ } bool ARMConstantPoolMBB::hasSameValue(ARMConstantPoolValue *ACPV) { - const ARMConstantPoolMBB *ACPMBB = dyn_cast(ACPV); + const auto *ACPMBB = dyn_cast(ACPV); return ACPMBB && ACPMBB->MBB == MBB && ARMConstantPoolValue::hasSameValue(ACPV); } Index: lib/Target/ARM/ARMFastISel.cpp =================================================================== --- lib/Target/ARM/ARMFastISel.cpp +++ lib/Target/ARM/ARMFastISel.cpp @@ -440,7 +440,7 @@ // If we can do this in a single instruction without a constant pool entry // do so now. - const ConstantInt *CI = cast(C); + const auto *CI = cast(C); if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) { unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16; const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass : @@ -521,7 +521,7 @@ unsigned DestReg = createResultReg(RC); // FastISel TLS support on non-MachO is broken, punt to SelectionDAG. - const GlobalVariable *GVar = dyn_cast(GV); + const auto *GVar = dyn_cast(GV); bool IsThreadLocal = GVar && GVar->isThreadLocal(); if (!Subtarget->isTargetMachO() && IsThreadLocal) return 0; @@ -619,9 +619,9 @@ if (!CEVT.isSimple()) return 0; MVT VT = CEVT.getSimpleVT(); - if (const ConstantFP *CFP = dyn_cast(C)) + if (const auto *CFP = dyn_cast(C)) return ARMMaterializeFP(CFP, VT); - else if (const GlobalValue *GV = dyn_cast(C)) + else if (const auto *GV = dyn_cast(C)) return ARMMaterializeGV(GV, VT); else if (isa(C)) return ARMMaterializeInt(C, VT); @@ -687,7 +687,7 @@ // Some boilerplate from the X86 FastISel. const User *U = nullptr; unsigned Opcode = Instruction::UserOp1; - if (const Instruction *I = dyn_cast(Obj)) { + if (const auto *I = dyn_cast(Obj)) { // Don't walk into other basic blocks unless the object is an alloca from // another block, otherwise it may not have a virtual register assigned. if (FuncInfo.StaticAllocaMap.count(static_cast(Obj)) || @@ -695,12 +695,12 @@ Opcode = I->getOpcode(); U = I; } - } else if (const ConstantExpr *C = dyn_cast(Obj)) { + } else if (const auto *C = dyn_cast(Obj)) { Opcode = C->getOpcode(); U = C; } - if (PointerType *Ty = dyn_cast(Obj->getType())) + if (auto *Ty = dyn_cast(Obj->getType())) if (Ty->getAddressSpace() > 255) // Fast instruction selection doesn't support the special // address spaces. @@ -740,14 +740,14 @@ } else { uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType()); for (;;) { - if (const ConstantInt *CI = dyn_cast(Op)) { + if (const auto *CI = dyn_cast(Op)) { // Constant-offset addressing. TmpOffset += CI->getSExtValue() * S; break; } if (canFoldAddIntoGEP(U, Op)) { // A compatible add with a constant operand. Fold the constant. - ConstantInt *CI = + auto *CI = cast(cast(Op)->getOperand(1)); TmpOffset += CI->getSExtValue() * S; // Iterate on the other operand. @@ -771,7 +771,7 @@ break; } case Instruction::Alloca: { - const AllocaInst *AI = cast(Obj); + const auto *AI = cast(Obj); DenseMap::iterator SI = FuncInfo.StaticAllocaMap.find(AI); if (SI != FuncInfo.StaticAllocaMap.end()) { @@ -997,12 +997,12 @@ if (TLI.supportSwiftError()) { // Swifterror values can come from either a function parameter with // swifterror attribute or an alloca with swifterror attribute. - if (const Argument *Arg = dyn_cast(SV)) { + if (const auto *Arg = dyn_cast(SV)) { if (Arg->hasSwiftErrorAttr()) return false; } - if (const AllocaInst *Alloca = dyn_cast(SV)) { + if (const auto *Alloca = dyn_cast(SV)) { if (Alloca->isSwiftError()) return false; } @@ -1128,12 +1128,12 @@ if (TLI.supportSwiftError()) { // Swifterror values can come from either a function parameter with // swifterror attribute or an alloca with swifterror attribute. - if (const Argument *Arg = dyn_cast(PtrV)) { + if (const auto *Arg = dyn_cast(PtrV)) { if (Arg->hasSwiftErrorAttr()) return false; } - if (const AllocaInst *Alloca = dyn_cast(PtrV)) { + if (const auto *Alloca = dyn_cast(PtrV)) { if (Alloca->isSwiftError()) return false; } @@ -1206,7 +1206,7 @@ } bool ARMFastISel::SelectBranch(const Instruction *I) { - const BranchInst *BI = cast(I); + const auto *BI = cast(I); MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; @@ -1214,7 +1214,7 @@ // If we can, avoid recomputing the compare - redoing it could lead to wonky // behavior. - if (const CmpInst *CI = dyn_cast(BI->getCondition())) { + if (const auto *CI = dyn_cast(BI->getCondition())) { if (CI->hasOneUse() && (CI->getParent() == I->getParent())) { // Get the compare predicate. @@ -1240,7 +1240,7 @@ finishCondBranch(BI->getParent(), TBB, FBB); return true; } - } else if (TruncInst *TI = dyn_cast(BI->getCondition())) { + } else if (auto *TI = dyn_cast(BI->getCondition())) { MVT SourceVT; if (TI->hasOneUse() && TI->getParent() == I->getParent() && (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) { @@ -1264,7 +1264,7 @@ finishCondBranch(BI->getParent(), TBB, FBB); return true; } - } else if (const ConstantInt *CI = + } else if (const auto *CI = dyn_cast(BI->getCondition())) { uint64_t Imm = CI->getZExtValue(); MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB; @@ -1310,7 +1310,7 @@ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)).addReg(AddrReg)); - const IndirectBrInst *IB = cast(I); + const auto *IB = cast(I); for (const BasicBlock *SuccBB : IB->successors()) FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[SuccBB]); @@ -1335,7 +1335,7 @@ bool isNegativeImm = false; // FIXME: At -O0 we don't have anything that canonicalizes operand order. // Thus, Src1Value may be a ConstantInt, but we're missing it. - if (const ConstantInt *ConstInt = dyn_cast(Src2Value)) { + if (const auto *ConstInt = dyn_cast(Src2Value)) { if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 || SrcVT == MVT::i1) { const APInt &CIVal = ConstInt->getValue(); @@ -1350,7 +1350,7 @@ UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) : (ARM_AM::getSOImmVal(Imm) != -1); } - } else if (const ConstantFP *ConstFP = dyn_cast(Src2Value)) { + } else if (const auto *ConstFP = dyn_cast(Src2Value)) { if (SrcVT == MVT::f32 || SrcVT == MVT::f64) if (ConstFP->isZero() && !ConstFP->isNegative()) UseImm = true; @@ -1435,7 +1435,7 @@ } bool ARMFastISel::SelectCmp(const Instruction *I) { - const CmpInst *CI = cast(I); + const auto *CI = cast(I); // Get the compare predicate. ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate()); @@ -1595,7 +1595,7 @@ int Imm = 0; bool UseImm = false; bool isNegativeImm = false; - if (const ConstantInt *ConstInt = dyn_cast(I->getOperand(2))) { + if (const auto *ConstInt = dyn_cast(I->getOperand(2))) { assert (VT == MVT::i32 && "Expecting an i32."); Imm = (int)ConstInt->getValue().getZExtValue(); if (Imm < 0) { @@ -2049,7 +2049,7 @@ } bool ARMFastISel::SelectRet(const Instruction *I) { - const ReturnInst *Ret = cast(I); + const auto *Ret = cast(I); const Function &F = *I->getParent()->getParent(); if (!FuncInfo.CanLowerReturn) @@ -2256,7 +2256,7 @@ bool ARMFastISel::SelectCall(const Instruction *I, const char *IntrMemName = nullptr) { - const CallInst *CI = cast(I); + const auto *CI = cast(I); const Value *Callee = CI->getCalledValue(); // Can't handle inline asm. @@ -2353,7 +2353,7 @@ return false; bool UseReg = false; - const GlobalValue *GV = dyn_cast(Callee); + const auto *GV = dyn_cast(Callee); if (!GV || Subtarget->genLongCalls()) UseReg = true; unsigned CalleeReg = 0; @@ -2483,7 +2483,7 @@ } case Intrinsic::memcpy: case Intrinsic::memmove: { - const MemTransferInst &MTI = cast(I); + const auto &MTI = cast(I); // Don't handle volatile. if (MTI.isVolatile()) return false; @@ -2516,7 +2516,7 @@ return SelectCall(&I, IntrMemName); } case Intrinsic::memset: { - const MemSetInst &MSI = cast(I); + const auto &MSI = cast(I); // Don't handle volatile. if (MSI.isVolatile()) return false; @@ -2737,7 +2737,7 @@ unsigned Opc = ARM::MOVsr; unsigned ShiftImm; Value *Src2Value = I->getOperand(1); - if (const ConstantInt *CI = dyn_cast(Src2Value)) { + if (const auto *CI = dyn_cast(Src2Value)) { ShiftImm = CI->getZExtValue(); // Fall back to selection DAG isel if the shift amount @@ -2825,7 +2825,7 @@ case Instruction::URem: return SelectRem(I, /*isSigned*/ false); case Instruction::Call: - if (const IntrinsicInst *II = dyn_cast(I)) + if (const auto *II = dyn_cast(I)) return SelectIntrinsicCall(*II); return SelectCall(I); case Instruction::Select: Index: lib/Target/ARM/ARMISelDAGToDAG.cpp =================================================================== --- lib/Target/ARM/ARMISelDAGToDAG.cpp +++ lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -125,7 +125,7 @@ } bool SelectCMOVPred(SDValue N, SDValue &Pred, SDValue &Reg) { - const ConstantSDNode *CN = cast(N); + const auto *CN = cast(N); Pred = CurDAG->getTargetConstant(CN->getZExtValue(), SDLoc(N), MVT::i32); Reg = CurDAG->getRegister(ARM::CPSR, MVT::i32); return true; @@ -321,7 +321,7 @@ assert(Scale > 0 && "Invalid scale!"); // Check that this is a constant. - const ConstantSDNode *C = dyn_cast(Node); + const auto *C = dyn_cast(Node); if (!C) return false; @@ -503,7 +503,7 @@ // will make other uses incorrect, so don't. if (!N.hasOneUse()) return false; // Check if the multiply is by a constant - ConstantSDNode *MulConst = dyn_cast(N.getOperand(1)); + auto *MulConst = dyn_cast(N.getOperand(1)); if (!MulConst) return false; // If the constant is used in more than one place then modifying it will mean // we need to materialize two constants instead of one, which is a bad idea. @@ -562,7 +562,7 @@ BaseReg = N.getOperand(0); unsigned ShImmVal = 0; - ConstantSDNode *RHS = dyn_cast(N.getOperand(1)); + auto *RHS = dyn_cast(N.getOperand(1)); if (!RHS) return false; ShImmVal = RHS->getZExtValue() & 31; Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal), @@ -586,7 +586,7 @@ BaseReg = N.getOperand(0); unsigned ShImmVal = 0; - ConstantSDNode *RHS = dyn_cast(N.getOperand(1)); + auto *RHS = dyn_cast(N.getOperand(1)); if (RHS) return false; ShReg = N.getOperand(1); @@ -626,7 +626,7 @@ return true; } - if (ConstantSDNode *RHS = dyn_cast(N.getOperand(1))) { + if (auto *RHS = dyn_cast(N.getOperand(1))) { int RHSC = (int)RHS->getSExtValue(); if (N.getOpcode() == ISD::SUB) RHSC = -RHSC; @@ -655,7 +655,7 @@ SDValue &Opc) { if (N.getOpcode() == ISD::MUL && ((!Subtarget->isLikeA9() && !Subtarget->isSwift()) || N.hasOneUse())) { - if (ConstantSDNode *RHS = dyn_cast(N.getOperand(1))) { + if (auto *RHS = dyn_cast(N.getOperand(1))) { // X * [3,5,9] -> X + X * [2,4,8] etc. int RHSC = (int)RHS->getZExtValue(); if (RHSC & 1) { @@ -702,7 +702,7 @@ if (ShOpcVal != ARM_AM::no_shift) { // Check to see if the RHS of the shift is a constant, if not, we can't fold // it. - if (ConstantSDNode *Sh = + if (auto *Sh = dyn_cast(N.getOperand(1).getOperand(1))) { ShAmt = Sh->getZExtValue(); if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt)) @@ -724,7 +724,7 @@ if (ShOpcVal != ARM_AM::no_shift) { // Check to see if the RHS of the shift is a constant, if not, we can't // fold it. - if (ConstantSDNode *Sh = + if (auto *Sh = dyn_cast(N.getOperand(0).getOperand(1))) { ShAmt = Sh->getZExtValue(); if (isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt)) { @@ -766,7 +766,7 @@ SDValue &Opc) { if (N.getOpcode() == ISD::MUL && (!(Subtarget->isLikeA9() || Subtarget->isSwift()) || N.hasOneUse())) { - if (ConstantSDNode *RHS = dyn_cast(N.getOperand(1))) { + if (auto *RHS = dyn_cast(N.getOperand(1))) { // X * [3,5,9] -> X + X * [2,4,8] etc. int RHSC = (int)RHS->getZExtValue(); if (RHSC & 1) { @@ -856,7 +856,7 @@ if (ShOpcVal != ARM_AM::no_shift) { // Check to see if the RHS of the shift is a constant, if not, we can't fold // it. - if (ConstantSDNode *Sh = + if (auto *Sh = dyn_cast(N.getOperand(1).getOperand(1))) { ShAmt = Sh->getZExtValue(); if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt)) @@ -878,7 +878,7 @@ if (ShOpcVal != ARM_AM::no_shift) { // Check to see if the RHS of the shift is a constant, if not, we can't // fold it. - if (ConstantSDNode *Sh = + if (auto *Sh = dyn_cast(N.getOperand(0).getOperand(1))) { ShAmt = Sh->getZExtValue(); if (isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt)) { @@ -917,7 +917,7 @@ if (ShOpcVal != ARM_AM::no_shift) { // Check to see if the RHS of the shift is a constant, if not, we can't fold // it. - if (ConstantSDNode *Sh = dyn_cast(N.getOperand(1))) { + if (auto *Sh = dyn_cast(N.getOperand(1))) { ShAmt = Sh->getZExtValue(); if (isShifterOpProfitable(N, ShOpcVal, ShAmt)) Offset = N.getOperand(0); @@ -1108,7 +1108,7 @@ unsigned Alignment = 0; - MemSDNode *MemN = cast(Parent); + auto *MemN = cast(Parent); if (isa(MemN) || ((MemN->getOpcode() == ARMISD::VST1_UPD || @@ -1133,12 +1133,12 @@ bool ARMDAGToDAGISel::SelectAddrMode6Offset(SDNode *Op, SDValue N, SDValue &Offset) { - LSBaseSDNode *LdSt = cast(Op); + auto *LdSt = cast(Op); ISD::MemIndexedMode AM = LdSt->getAddressingMode(); if (AM != ISD::POST_INC) return false; Offset = N; - if (ConstantSDNode *NC = dyn_cast(N)) { + if (auto *NC = dyn_cast(N)) { if (NC->getZExtValue() * 8 == LdSt->getMemoryVT().getSizeInBits()) Offset = CurDAG->getRegister(0, MVT::i32); } @@ -1166,7 +1166,7 @@ bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDValue N, SDValue &Base, SDValue &Offset){ if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N)) { - ConstantSDNode *NC = dyn_cast(N); + auto *NC = dyn_cast(N); if (!NC || !NC->isNullValue()) return false; @@ -1247,7 +1247,7 @@ if (!CurDAG->isBaseWithConstantOffset(N)) return false; - RegisterSDNode *LHSR = dyn_cast(N.getOperand(0)); + auto *LHSR = dyn_cast(N.getOperand(0)); if (N.getOperand(0).getOpcode() == ISD::FrameIndex || (LHSR && LHSR->getReg() == ARM::SP)) { // If the RHS is + imm8 * scale, fold into addr mode. @@ -1307,7 +1307,7 @@ return true; } - if (ConstantSDNode *RHS = dyn_cast(N.getOperand(1))) { + if (auto *RHS = dyn_cast(N.getOperand(1))) { if (SelectT2AddrModeImm8(N, Base, OffImm)) // Let t2LDRi8 handle (R - imm8). return false; @@ -1341,7 +1341,7 @@ !CurDAG->isBaseWithConstantOffset(N)) return false; - if (ConstantSDNode *RHS = dyn_cast(N.getOperand(1))) { + if (auto *RHS = dyn_cast(N.getOperand(1))) { int RHSC = (int)RHS->getSExtValue(); if (N.getOpcode() == ISD::SUB) RHSC = -RHSC; @@ -1386,7 +1386,7 @@ return false; // Leave (R + imm12) for t2LDRi12, (R - imm8) for t2LDRi8. - if (ConstantSDNode *RHS = dyn_cast(N.getOperand(1))) { + if (auto *RHS = dyn_cast(N.getOperand(1))) { int RHSC = (int)RHS->getZExtValue(); if (RHSC >= 0 && RHSC < 0x1000) // 12 bits (unsigned) return false; @@ -1410,7 +1410,7 @@ if (ShOpcVal == ARM_AM::lsl) { // Check to see if the RHS of the shift is a constant, if not, we can't fold // it. - if (ConstantSDNode *Sh = dyn_cast(OffReg.getOperand(1))) { + if (auto *Sh = dyn_cast(OffReg.getOperand(1))) { ShAmt = Sh->getZExtValue(); if (ShAmt < 4 && isShifterOpProfitable(OffReg, ShOpcVal, ShAmt)) OffReg = OffReg.getOperand(0); @@ -1446,7 +1446,7 @@ if (N.getOpcode() != ISD::ADD || !CurDAG->isBaseWithConstantOffset(N)) return true; - ConstantSDNode *RHS = dyn_cast(N.getOperand(1)); + auto *RHS = dyn_cast(N.getOperand(1)); if (!RHS) return true; @@ -1479,7 +1479,7 @@ } bool ARMDAGToDAGISel::tryARMIndexedLoad(SDNode *N) { - LoadSDNode *LD = cast(N); + auto *LD = cast(N); ISD::MemIndexedMode AM = LD->getAddressingMode(); if (AM == ISD::UNINDEXED) return false; @@ -1558,7 +1558,7 @@ } bool ARMDAGToDAGISel::tryT1IndexedLoad(SDNode *N) { - LoadSDNode *LD = cast(N); + auto *LD = cast(N); EVT LoadedVT = LD->getMemoryVT(); ISD::MemIndexedMode AM = LD->getAddressingMode(); if (AM != ISD::POST_INC || LD->getExtensionType() != ISD::NON_EXTLOAD || @@ -1585,7 +1585,7 @@ } bool ARMDAGToDAGISel::tryT2IndexedLoad(SDNode *N) { - LoadSDNode *LD = cast(N); + auto *LD = cast(N); ISD::MemIndexedMode AM = LD->getAddressingMode(); if (AM == ISD::UNINDEXED) return false; @@ -2521,7 +2521,7 @@ SDValue ADDSrc1 = XORSrc0.getOperand(1); SDValue SRASrc0 = XORSrc1.getOperand(0); SDValue SRASrc1 = XORSrc1.getOperand(1); - ConstantSDNode *SRAConstant = dyn_cast(SRASrc1); + auto *SRAConstant = dyn_cast(SRASrc1); EVT XType = SRASrc0.getValueType(); unsigned Size = XType.getSizeInBits() - 1; @@ -2554,7 +2554,7 @@ if (SignExt.getOpcode() != ISD::SRA) return false; - ConstantSDNode *SRASrc1 = dyn_cast(SignExt.getOperand(1)); + auto *SRASrc1 = dyn_cast(SignExt.getOperand(1)); if (!SRASrc1 || SRASrc1->getZExtValue() != 16) return false; @@ -2563,7 +2563,7 @@ // The sign extend operand for SM*WB could be generated by a shl and ashr. if (Op0.getOpcode() == ISD::SHL) { SDValue SHL = Op0; - ConstantSDNode *SHLSrc1 = dyn_cast(SHL.getOperand(1)); + auto *SHLSrc1 = dyn_cast(SHL.getOperand(1)); if (!SHLSrc1 || SHLSrc1->getZExtValue() != 16) return false; @@ -2593,8 +2593,8 @@ return false; } - ConstantSDNode *SRLSrc1 = dyn_cast(SRL.getOperand(1)); - ConstantSDNode *SHLSrc1 = dyn_cast(SHL.getOperand(1)); + auto *SRLSrc1 = dyn_cast(SRL.getOperand(1)); + auto *SHLSrc1 = dyn_cast(SHL.getOperand(1)); if (!SRLSrc1 || !SHLSrc1 || SRLSrc1->getZExtValue() != 16 || SHLSrc1->getZExtValue() != 16) return false; @@ -2810,7 +2810,7 @@ case ISD::MUL: if (Subtarget->isThumb1Only()) break; - if (ConstantSDNode *C = dyn_cast(N->getOperand(1))) { + if (auto *C = dyn_cast(N->getOperand(1))) { unsigned RHSV = C->getZExtValue(); if (!RHSV) break; if (isPowerOf2_32(RHSV-1)) { // 2^n+1? @@ -2919,7 +2919,7 @@ break; if (N0.getOpcode() == ISD::OR && N0.getNode()->hasOneUse()) { SDValue N2 = N0.getOperand(1); - ConstantSDNode *N2C = dyn_cast(N2); + auto *N2C = dyn_cast(N2); if (!N2C) break; unsigned N1CVal = N1C->getZExtValue(); @@ -3001,8 +3001,8 @@ if (Adde.getOperand(2).getNode() == Addc.getNode()) { - ConstantSDNode *Op0 = dyn_cast(Adde.getOperand(0)); - ConstantSDNode *Op1 = dyn_cast(Adde.getOperand(1)); + auto *Op0 = dyn_cast(Adde.getOperand(0)); + auto *Op1 = dyn_cast(Adde.getOperand(1)); if (Op0 && Op1 && Op0->getZExtValue() == 0 && Op1->getZExtValue() == 0) { @@ -4078,8 +4078,8 @@ // using the supplied metadata string to select the instruction node to use // and the registers/masks to construct as operands for the node. bool ARMDAGToDAGISel::tryReadRegister(SDNode *N){ - const MDNodeSDNode *MD = dyn_cast(N->getOperand(1)); - const MDString *RegString = dyn_cast(MD->getMD()->getOperand(0)); + const auto *MD = dyn_cast(N->getOperand(1)); + const auto *RegString = dyn_cast(MD->getMD()->getOperand(0)); bool IsThumb2 = Subtarget->isThumb2(); SDLoc DL(N); @@ -4199,8 +4199,8 @@ // using the supplied metadata string to select the instruction node to use // and the registers/masks to use in the nodes bool ARMDAGToDAGISel::tryWriteRegister(SDNode *N){ - const MDNodeSDNode *MD = dyn_cast(N->getOperand(1)); - const MDString *RegString = dyn_cast(MD->getMD()->getOperand(0)); + const auto *MD = dyn_cast(N->getOperand(1)); + const auto *RegString = dyn_cast(MD->getMD()->getOperand(0)); bool IsThumb2 = Subtarget->isThumb2(); SDLoc DL(N); @@ -4331,7 +4331,7 @@ if (i < InlineAsm::Op_FirstOperand) continue; - if (ConstantSDNode *C = dyn_cast(N->getOperand(i))) { + if (auto *C = dyn_cast(N->getOperand(i))) { Flag = C->getZExtValue(); Kind = InlineAsm::getKind(Flag); } Index: lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- lib/Target/ARM/ARMISelLowering.cpp +++ lib/Target/ARM/ARMISelLowering.cpp @@ -2005,7 +2005,7 @@ const TargetMachine &TM = getTargetMachine(); const Module *Mod = MF.getFunction()->getParent(); const GlobalValue *GV = nullptr; - if (GlobalAddressSDNode *G = dyn_cast(Callee)) + if (auto *G = dyn_cast(Callee)) GV = G->getGlobal(); bool isStub = !TM.shouldAssumeDSOLocal(*Mod, GV) && Subtarget->isTargetMachO(); @@ -2033,7 +2033,7 @@ Callee = DAG.getLoad( PtrVt, dl, DAG.getEntryNode(), CPAddr, MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); - } else if (ExternalSymbolSDNode *S=dyn_cast(Callee)) { + } else if (auto *S=dyn_cast(Callee)) { const char *Sym = S->getSymbol(); // Create a constant pool entry for the callee address @@ -2094,7 +2094,7 @@ Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, 0); } } - } else if (ExternalSymbolSDNode *S = dyn_cast(Callee)) { + } else if (auto *S = dyn_cast(Callee)) { isDirect = true; // tBX takes a register source operand. const char *Sym = S->getSymbol(); @@ -2270,7 +2270,7 @@ } else { return false; } - } else if (LoadSDNode *Ld = dyn_cast(Arg)) { + } else if (auto *Ld = dyn_cast(Arg)) { if (Flags.isByVal()) // ByVal argument is passed in as a pointer but it's now being // dereferenced. e.g. @@ -2279,7 +2279,7 @@ // } return false; SDValue Ptr = Ld->getBasePtr(); - FrameIndexSDNode *FINode = dyn_cast(Ptr); + auto *FINode = dyn_cast(Ptr); if (!FINode) return false; FI = FINode->getIndex(); @@ -2332,7 +2332,7 @@ // next instruction. The behaviour of branch instructions in this // situation (as used for tail calls) is implementation-defined, so we // cannot rely on the linker replacing the tail call with a return. - if (GlobalAddressSDNode *G = dyn_cast(Callee)) { + if (auto *G = dyn_cast(Callee)) { const GlobalValue *GV = G->getGlobal(); const Triple &TT = getTargetMachine().getTargetTriple(); if (GV->hasExternalWeakLinkage() && @@ -2701,7 +2701,7 @@ EVT PtrVT = Op.getValueType(); // FIXME there is no actual debug info here SDLoc dl(Op); - ConstantPoolSDNode *CP = cast(Op); + auto *CP = cast(Op); SDValue Res; if (CP->isMachineConstantPoolEntry()) Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, @@ -2973,7 +2973,7 @@ // TODO: implement the "local dynamic" model assert(Subtarget->isTargetELF() && "Only ELF implemented here"); - GlobalAddressSDNode *GA = cast(Op); + auto *GA = cast(Op); if (DAG.getTarget().Options.EmulatedTLS) return LowerToTLSEmulatedModel(GA, DAG); @@ -3144,7 +3144,7 @@ SDLoc dl(Op); const GlobalValue *GV = cast(Op)->getGlobal(); const TargetMachine &TM = getTargetMachine(); - if (const GlobalAlias *GA = dyn_cast(GV)) + if (const auto *GA = dyn_cast(GV)) GV = GA->getBaseObject(); bool IsRO = (isa(GV) && cast(GV)->isConstant()) || @@ -3385,7 +3385,7 @@ DAG.getConstant(0, dl, MVT::i32)); } - ConstantSDNode *OrdN = cast(Op.getOperand(1)); + auto *OrdN = cast(Op.getOperand(1)); AtomicOrdering Ord = static_cast(OrdN->getZExtValue()); ARM_MB::MemBOpt Domain = ARM_MB::ISH; if (Subtarget->isMClass()) { @@ -3765,14 +3765,14 @@ /// isFloatingPointZero - Return true if this is +0.0. static bool isFloatingPointZero(SDValue Op) { - if (ConstantFPSDNode *CFP = dyn_cast(Op)) + if (auto *CFP = dyn_cast(Op)) return CFP->getValueAPF().isPosZero(); else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { // Maybe this has already been legalized into the constant pool? if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) { SDValue WrapperOp = Op.getOperand(1).getOperand(0); - if (ConstantPoolSDNode *CP = dyn_cast(WrapperOp)) - if (const ConstantFP *CFP = dyn_cast(CP->getConstVal())) + if (auto *CP = dyn_cast(WrapperOp)) + if (const auto *CFP = dyn_cast(CP->getConstVal())) return CFP->getValueAPF().isPosZero(); } } else if (Op->getOpcode() == ISD::BITCAST && @@ -3792,7 +3792,7 @@ SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue &ARMcc, SelectionDAG &DAG, const SDLoc &dl) const { - if (ConstantSDNode *RHSC = dyn_cast(RHS.getNode())) { + if (auto *RHSC = dyn_cast(RHS.getNode())) { unsigned C = RHSC->getZExtValue(); if (!isLegalICmpImmediate(C)) { // Constant does not fit, try adjusting it by one? @@ -3976,9 +3976,9 @@ // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond) // if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) { - const ConstantSDNode *CMOVTrue = + const auto *CMOVTrue = dyn_cast(Cond.getOperand(0)); - const ConstantSDNode *CMOVFalse = + const auto *CMOVFalse = dyn_cast(Cond.getOperand(1)); if (CMOVTrue && CMOVFalse) { @@ -4334,7 +4334,7 @@ if (isFloatingPointZero(Op)) return DAG.getConstant(0, SDLoc(Op), MVT::i32); - if (LoadSDNode *Ld = dyn_cast(Op)) + if (auto *Ld = dyn_cast(Op)) return DAG.getLoad(MVT::i32, SDLoc(Op), Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(), Ld->getAlignment(), Ld->getMemOperand()->getFlags()); @@ -4352,7 +4352,7 @@ return; } - if (LoadSDNode *Ld = dyn_cast(Op)) { + if (auto *Ld = dyn_cast(Op)) { SDValue Ptr = Ld->getBasePtr(); RetVal1 = DAG.getLoad(MVT::i32, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(), @@ -4485,7 +4485,7 @@ SDLoc dl(Op); EVT PTy = getPointerTy(DAG.getDataLayout()); - JumpTableSDNode *JT = cast(Table); + auto *JT = cast(Table); SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy); Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI); Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, dl, PTy)); @@ -4786,7 +4786,7 @@ // If the index is not constant, we will introduce an additional // multiply that will stick. // Give up in that case. - ConstantSDNode *Index = dyn_cast(Op.getOperand(1)); + auto *Index = dyn_cast(Op.getOperand(1)); if (!Index) return SDValue(); unsigned DstNumElt = DstVT.getVectorNumElements(); @@ -5573,7 +5573,7 @@ return SDValue(); bool IsDouble = Op.getValueType() == MVT::f64; - ConstantFPSDNode *CFP = cast(Op); + auto *CFP = cast(Op); // Use the default (constant pool) lowering for double constants when we have // an SP-only FPU @@ -6043,7 +6043,7 @@ // expansion code take care of it. SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST) const { - BuildVectorSDNode *BVN = cast(Op.getNode()); + auto *BVN = cast(Op.getNode()); SDLoc dl(Op); EVT VT = Op.getValueType(); @@ -6614,7 +6614,7 @@ SDValue V2 = Op.getOperand(1); SDLoc dl(Op); EVT VT = Op.getValueType(); - ShuffleVectorSDNode *SVN = cast(Op.getNode()); + auto *SVN = cast(Op.getNode()); // Convert shuffles that are directly supported on NEON to target-specific // DAG nodes, instead of keeping them as shuffles and matching them again @@ -6840,10 +6840,10 @@ return false; unsigned LoElt = DAG.getDataLayout().isBigEndian() ? 1 : 0; unsigned HiElt = 1 - LoElt; - ConstantSDNode *Lo0 = dyn_cast(BVN->getOperand(LoElt)); - ConstantSDNode *Hi0 = dyn_cast(BVN->getOperand(HiElt)); - ConstantSDNode *Lo1 = dyn_cast(BVN->getOperand(LoElt+2)); - ConstantSDNode *Hi1 = dyn_cast(BVN->getOperand(HiElt+2)); + auto *Lo0 = dyn_cast(BVN->getOperand(LoElt)); + auto *Hi0 = dyn_cast(BVN->getOperand(HiElt)); + auto *Lo1 = dyn_cast(BVN->getOperand(LoElt+2)); + auto *Hi1 = dyn_cast(BVN->getOperand(HiElt+2)); if (!Lo0 || !Hi0 || !Lo1 || !Hi1) return false; if (isSigned) { @@ -6862,7 +6862,7 @@ for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { SDNode *Elt = N->getOperand(i).getNode(); - if (ConstantSDNode *C = dyn_cast(Elt)) { + if (auto *C = dyn_cast(Elt)) { unsigned EltSize = VT.getScalarSizeInBits(); unsigned HalfSize = EltSize / 2; if (isSigned) { @@ -6973,7 +6973,7 @@ N->getValueType(0), N->getOpcode()); - if (LoadSDNode *LD = dyn_cast(N)) + if (auto *LD = dyn_cast(N)) return SkipLoadExtensionForVMULL(LD, DAG); // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will @@ -6996,7 +6996,7 @@ SmallVector Ops; SDLoc dl(N); for (unsigned i = 0; i != NumElts; ++i) { - ConstantSDNode *C = cast(N->getOperand(i)); + auto *C = cast(N->getOperand(i)); const APInt &CInt = C->getAPIntValue(); // Element types smaller than 32 bits are not legal, so use i32 elements. // The values are implicitly truncated so sext vs. zext doesn't matter. @@ -9235,8 +9235,8 @@ return SDValue(); // Second is the constant, verify its correct. - ConstantSDNode *C0 = dyn_cast(ExtVec0->getOperand(1)); - ConstantSDNode *C1 = dyn_cast(ExtVec1->getOperand(1)); + auto *C0 = dyn_cast(ExtVec0->getOperand(1)); + auto *C1 = dyn_cast(ExtVec1->getOperand(1)); // For the constant, we want to see all the even or all the odd. if (!C0 || !C1 || C0->getZExtValue() != nextIndex @@ -9626,7 +9626,7 @@ if (VT != MVT::i32) return SDValue(); - ConstantSDNode *C = dyn_cast(N->getOperand(1)); + auto *C = dyn_cast(N->getOperand(1)); if (!C) return SDValue(); @@ -9698,7 +9698,7 @@ const ARMSubtarget *Subtarget) { // Attempt to use immediate-form VBIC - BuildVectorSDNode *BVN = dyn_cast(N->getOperand(1)); + auto *BVN = dyn_cast(N->getOperand(1)); SDLoc dl(N); EVT VT = N->getValueType(0); SelectionDAG &DAG = DCI.DAG; @@ -9740,7 +9740,7 @@ TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget) { // Attempt to use immediate-form VORR - BuildVectorSDNode *BVN = dyn_cast(N->getOperand(1)); + auto *BVN = dyn_cast(N->getOperand(1)); SDLoc dl(N); EVT VT = N->getValueType(0); SelectionDAG &DAG = DCI.DAG; @@ -9790,8 +9790,8 @@ bool HasAnyUndefs; APInt SplatBits0, SplatBits1; - BuildVectorSDNode *BVN0 = dyn_cast(N0->getOperand(1)); - BuildVectorSDNode *BVN1 = dyn_cast(N1->getOperand(1)); + auto *BVN0 = dyn_cast(N0->getOperand(1)); + auto *BVN1 = dyn_cast(N1->getOperand(1)); // Ensure that the second operand of both ands are constants if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize, HasAnyUndefs) && !HasAnyUndefs) { @@ -9842,7 +9842,7 @@ // actually a bitfield set. If the mask is 0xffff, we can do better // via a movt instruction, so don't use BFI in that case. SDValue MaskOp = N0.getOperand(1); - ConstantSDNode *MaskC = dyn_cast(MaskOp); + auto *MaskC = dyn_cast(MaskOp); if (!MaskC) return SDValue(); unsigned Mask = MaskC->getZExtValue(); @@ -9850,7 +9850,7 @@ return SDValue(); SDValue Res; // Case (1): or (and A, mask), val => ARMbfi A, val, mask - ConstantSDNode *N1C = dyn_cast(N1); + auto *N1C = dyn_cast(N1); if (N1C) { unsigned Val = N1C->getZExtValue(); if ((Val & ~Mask) != Val) @@ -9869,7 +9869,7 @@ } } else if (N1.getOpcode() == ISD::AND) { // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask - ConstantSDNode *N11C = dyn_cast(N1.getOperand(1)); + auto *N11C = dyn_cast(N1.getOperand(1)); if (!N11C) return SDValue(); unsigned Mask2 = N11C->getZExtValue(); @@ -10032,7 +10032,7 @@ if (N1.getOpcode() == ISD::AND) { // (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff // the bits being cleared by the AND are not demanded by the BFI. - ConstantSDNode *N11C = dyn_cast(N1.getOperand(1)); + auto *N11C = dyn_cast(N1.getOperand(1)); if (!N11C) return SDValue(); unsigned InvMask = cast(N->getOperand(2))->getZExtValue(); @@ -10100,7 +10100,7 @@ InNode->getOperand(1).getOpcode() == ISD::FrameIndex && !cast(InNode)->isVolatile()) { // TODO: Should this be done for non-FrameIndex operands? - LoadSDNode *LD = cast(InNode); + auto *LD = cast(InNode); SelectionDAG &DAG = DCI.DAG; SDLoc DL(LD); @@ -10346,7 +10346,7 @@ SmallVector NewMask; unsigned NumElts = VT.getVectorNumElements(); unsigned HalfElts = NumElts/2; - ShuffleVectorSDNode *SVN = cast(N); + auto *SVN = cast(N); for (unsigned n = 0; n < NumElts; ++n) { int MaskElt = SVN->getMaskElt(n); int NewElt = -1; @@ -10373,7 +10373,7 @@ const bool isStore = N->getOpcode() == ISD::STORE; const unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1); SDValue Addr = N->getOperand(AddrOpIdx); - MemSDNode *MemN = cast(N); + auto *MemN = cast(N); SDLoc dl(N); // Search for a use of the address operand that is an increment. @@ -10458,7 +10458,7 @@ // If the increment is a constant, it must match the memory ref size. SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); - if (ConstantSDNode *CInc = dyn_cast(Inc.getNode())) { + if (auto *CInc = dyn_cast(Inc.getNode())) { uint64_t IncVal = CInc->getZExtValue(); if (IncVal != NumBytes) continue; @@ -10525,7 +10525,7 @@ Ops.push_back(N->getOperand(AddrOpIdx)); Ops.push_back(Inc); - if (StoreSDNode *StN = dyn_cast(N)) { + if (auto *StN = dyn_cast(N)) { // Try to match the intrinsic's signature Ops.push_back(StN->getValue()); } else { @@ -10632,7 +10632,7 @@ Tys[n] = MVT::Other; SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumVecs+1)); SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) }; - MemIntrinsicSDNode *VLDMemInt = cast(VLD); + auto *VLDMemInt = cast(VLD); SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys, Ops, VLDMemInt->getMemoryVT(), VLDMemInt->getMemOperand()); @@ -10707,7 +10707,7 @@ /// ISD::STORE. static SDValue PerformSTORECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { - StoreSDNode *St = cast(N); + auto *St = cast(N); if (St->isVolatile()) return SDValue(); @@ -10887,7 +10887,7 @@ } BitVector UndefElements; - BuildVectorSDNode *BV = cast(ConstVec); + auto *BV = cast(ConstVec); int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33); if (C == -1 || C == 0 || C > 32) return SDValue(); @@ -10945,7 +10945,7 @@ } BitVector UndefElements; - BuildVectorSDNode *BV = cast(ConstVec); + auto *BV = cast(ConstVec); int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33); if (C == -1 || C == 0 || C > 32) return SDValue(); @@ -10973,7 +10973,7 @@ // Ignore bit_converts. while (Op.getOpcode() == ISD::BITCAST) Op = Op.getOperand(0); - BuildVectorSDNode *BVN = dyn_cast(Op.getNode()); + auto *BVN = dyn_cast(Op.getNode()); APInt SplatBits, SplatUndef; unsigned SplatBitSize; bool HasAnyUndefs; @@ -11173,7 +11173,7 @@ // Canonicalize (srl (bswap x), 16) to (rotr (bswap x), 16) if the high // 16-bits of x is zero. This optimizes rev + lsr 16 to rev16. SDValue N1 = N->getOperand(1); - if (ConstantSDNode *C = dyn_cast(N1)) { + if (auto *C = dyn_cast(N1)) { SDValue N0 = N->getOperand(0); if (C->getZExtValue() == 16 && N0.getOpcode() == ISD::BSWAP && DAG.MaskedValueIsZero(N0.getOperand(0), @@ -11263,7 +11263,7 @@ // The operand to BFI is already a mask suitable for removing the bits it // sets. - ConstantSDNode *CI = cast(Op.getOperand(2)); + auto *CI = cast(Op.getOperand(2)); const APInt &Mask = CI->getAPIntValue(); KnownZero &= Mask; KnownOne &= Mask; @@ -11310,7 +11310,7 @@ SDValue And = CmpZ->getOperand(0); if (And->getOpcode() != ISD::AND) return SDValue(); - ConstantSDNode *AndC = dyn_cast(And->getOperand(1)); + auto *AndC = dyn_cast(And->getOperand(1)); if (!AndC || !AndC->getAPIntValue().isPowerOf2()) return SDValue(); SDValue X = And->getOperand(0); @@ -11326,7 +11326,7 @@ if (Op1->getOpcode() != ISD::OR) return SDValue(); - ConstantSDNode *OrC = dyn_cast(Op1->getOperand(1)); + auto *OrC = dyn_cast(Op1->getOperand(1)); if (!OrC) return SDValue(); SDValue Y = Op1->getOperand(0); @@ -11944,7 +11944,7 @@ if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) { // AddressingMode 3 Base = Ptr->getOperand(0); - if (ConstantSDNode *RHS = dyn_cast(Ptr->getOperand(1))) { + if (auto *RHS = dyn_cast(Ptr->getOperand(1))) { int RHSC = (int)RHS->getZExtValue(); if (RHSC < 0 && RHSC > -256) { assert(Ptr->getOpcode() == ISD::ADD); @@ -11958,7 +11958,7 @@ return true; } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) { // AddressingMode 2 - if (ConstantSDNode *RHS = dyn_cast(Ptr->getOperand(1))) { + if (auto *RHS = dyn_cast(Ptr->getOperand(1))) { int RHSC = (int)RHS->getZExtValue(); if (RHSC < 0 && RHSC > -0x1000) { assert(Ptr->getOpcode() == ISD::ADD); @@ -12001,7 +12001,7 @@ return false; Base = Ptr->getOperand(0); - if (ConstantSDNode *RHS = dyn_cast(Ptr->getOperand(1))) { + if (auto *RHS = dyn_cast(Ptr->getOperand(1))) { int RHSC = (int)RHS->getZExtValue(); if (RHSC < 0 && RHSC > -0x100) { // 8 bits. assert(Ptr->getOpcode() == ISD::ADD); @@ -12032,11 +12032,11 @@ EVT VT; SDValue Ptr; bool isSEXTLoad = false; - if (LoadSDNode *LD = dyn_cast(N)) { + if (auto *LD = dyn_cast(N)) { Ptr = LD->getBasePtr(); VT = LD->getMemoryVT(); isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; - } else if (StoreSDNode *ST = dyn_cast(N)) { + } else if (auto *ST = dyn_cast(N)) { Ptr = ST->getBasePtr(); VT = ST->getMemoryVT(); } else @@ -12068,12 +12068,12 @@ EVT VT; SDValue Ptr; bool isSEXTLoad = false, isNonExt; - if (LoadSDNode *LD = dyn_cast(N)) { + if (auto *LD = dyn_cast(N)) { VT = LD->getMemoryVT(); Ptr = LD->getBasePtr(); isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD; isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD; - } else if (StoreSDNode *ST = dyn_cast(N)) { + } else if (auto *ST = dyn_cast(N)) { VT = ST->getMemoryVT(); Ptr = ST->getBasePtr(); isNonExt = !ST->isTruncatingStore(); @@ -12153,7 +12153,7 @@ return; } case ISD::INTRINSIC_W_CHAIN: { - ConstantSDNode *CN = cast(Op->getOperand(1)); + auto *CN = cast(Op->getOperand(1)); Intrinsic::ID IntID = static_cast(CN->getZExtValue()); switch (IntID) { default: return; @@ -12178,7 +12178,7 @@ if (!Subtarget->hasV6Ops()) return false; - InlineAsm *IA = cast(CI->getCalledValue()); + auto *IA = cast(CI->getCalledValue()); std::string AsmStr = IA->getAsmString(); SmallVector AsmPieces; SplitString(AsmStr, AsmPieces, ";\n"); @@ -12194,7 +12194,7 @@ if (AsmPieces.size() == 3 && AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" && IA->getConstraintString().compare(0, 4, "=l,l") == 0) { - IntegerType *Ty = dyn_cast(CI->getType()); + auto *Ty = dyn_cast(CI->getType()); if (Ty && Ty->getBitWidth() == 32) return IntrinsicLowering::LowerToByteSwap(CI); } @@ -12352,7 +12352,7 @@ case 'j': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': - ConstantSDNode *C = dyn_cast(Op); + auto *C = dyn_cast(Op); if (!C) return; @@ -12781,7 +12781,7 @@ case Intrinsic::arm_ldaex: case Intrinsic::arm_ldrex: { auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); - PointerType *PtrTy = cast(I.getArgOperand(0)->getType()); + auto *PtrTy = cast(I.getArgOperand(0)->getType()); Info.opc = ISD::INTRINSIC_W_CHAIN; Info.memVT = MVT::getVT(PtrTy->getElementType()); Info.ptrVal = I.getArgOperand(0); @@ -12795,7 +12795,7 @@ case Intrinsic::arm_stlex: case Intrinsic::arm_strex: { auto &DL = I.getCalledFunction()->getParent()->getDataLayout(); - PointerType *PtrTy = cast(I.getArgOperand(1)->getType()); + auto *PtrTy = cast(I.getArgOperand(1)->getType()); Info.opc = ISD::INTRINSIC_W_CHAIN; Info.memVT = MVT::getVT(PtrTy->getElementType()); Info.ptrVal = I.getArgOperand(1); Index: lib/Target/ARM/ARMInstrInfo.cpp =================================================================== --- lib/Target/ARM/ARMInstrInfo.cpp +++ lib/Target/ARM/ARMInstrInfo.cpp @@ -108,7 +108,7 @@ return; } - const GlobalValue *GV = + const auto *GV = cast((*MI->memoperands_begin())->getValue()); if (!Subtarget.isGVIndirectSymbol(GV)) { Index: lib/Target/ARM/ARMSelectionDAGInfo.cpp =================================================================== --- lib/Target/ARM/ARMSelectionDAGInfo.cpp +++ lib/Target/ARM/ARMSelectionDAGInfo.cpp @@ -51,7 +51,7 @@ break; case RTLIB::MEMSET: AEABILibcall = AEABI_MEMSET; - if (ConstantSDNode *ConstantSrc = dyn_cast(Src)) + if (auto *ConstantSrc = dyn_cast(Src)) if (ConstantSrc->getZExtValue() == 0) AEABILibcall = AEABI_MEMCLR; break; @@ -137,7 +137,7 @@ return SDValue(); // This requires the copy size to be a constant, preferably // within a subtarget-specific limit. - ConstantSDNode *ConstantSize = dyn_cast(Size); + auto *ConstantSize = dyn_cast(Size); if (!ConstantSize) return EmitSpecializedLibcall(DAG, dl, Chain, Dst, Src, Size, Align, RTLIB::MEMCPY); Index: lib/Target/ARM/AsmParser/ARMAsmParser.cpp =================================================================== --- lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -844,7 +844,7 @@ bool isARMBranchTarget() const { if (!isImm()) return false; - if (const MCConstantExpr *CE = dyn_cast(getImm())) + if (const auto *CE = dyn_cast(getImm())) return CE->getValue() % 4 == 0; return true; } @@ -853,7 +853,7 @@ bool isThumbBranchTarget() const { if (!isImm()) return false; - if (const MCConstantExpr *CE = dyn_cast(getImm())) + if (const auto *CE = dyn_cast(getImm())) return CE->getValue() % 2 == 0; return true; } @@ -864,7 +864,7 @@ bool isUnsignedOffset() const { if (!isImm()) return false; if (isa(Imm.Val)) return true; - if (const MCConstantExpr *CE = dyn_cast(Imm.Val)) { + if (const auto *CE = dyn_cast(Imm.Val)) { int64_t Val = CE->getValue(); int64_t Align = 1LL << scale; int64_t Max = Align * ((1LL << width) - 1); @@ -878,7 +878,7 @@ bool isSignedOffset() const { if (!isImm()) return false; if (isa(Imm.Val)) return true; - if (const MCConstantExpr *CE = dyn_cast(Imm.Val)) { + if (const auto *CE = dyn_cast(Imm.Val)) { int64_t Val = CE->getValue(); int64_t Align = 1LL << scale; int64_t Max = Align * ((1LL << (width-1)) - 1); @@ -896,7 +896,7 @@ int64_t Val = 0; if (isImm()) { if (isa(Imm.Val)) return true; - const MCConstantExpr *CE = dyn_cast(Imm.Val); + const auto *CE = dyn_cast(Imm.Val); if (!CE) return false; Val = CE->getValue(); } @@ -910,49 +910,49 @@ } bool isFPImm() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue())); return Val != -1; } bool isFBits16() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value >= 0 && Value <= 16; } bool isFBits32() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value >= 1 && Value <= 32; } bool isImm8s4() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020; } bool isImm0_1020s4() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return ((Value & 3) == 0) && Value >= 0 && Value <= 1020; } bool isImm0_508s4() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return ((Value & 3) == 0) && Value >= 0 && Value <= 508; } bool isImm0_508s4Neg() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = -CE->getValue(); // explicitly exclude zero. we want that to use the normal 0_508 version. @@ -960,175 +960,175 @@ } bool isImm0_239() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value >= 0 && Value < 240; } bool isImm0_255() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value >= 0 && Value < 256; } bool isImm0_4095() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value >= 0 && Value < 4096; } bool isImm0_4095Neg() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = -CE->getValue(); return Value > 0 && Value < 4096; } bool isImm0_1() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value >= 0 && Value < 2; } bool isImm0_3() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value >= 0 && Value < 4; } bool isImm0_7() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value >= 0 && Value < 8; } bool isImm0_15() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value >= 0 && Value < 16; } bool isImm0_31() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value >= 0 && Value < 32; } bool isImm0_63() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value >= 0 && Value < 64; } bool isImm8() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value == 8; } bool isImm16() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value == 16; } bool isImm32() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value == 32; } bool isShrImm8() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value > 0 && Value <= 8; } bool isShrImm16() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value > 0 && Value <= 16; } bool isShrImm32() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value > 0 && Value <= 32; } bool isShrImm64() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value > 0 && Value <= 64; } bool isImm1_7() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value > 0 && Value < 8; } bool isImm1_15() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value > 0 && Value < 16; } bool isImm1_31() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value > 0 && Value < 32; } bool isImm1_16() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value > 0 && Value < 17; } bool isImm1_32() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value > 0 && Value < 33; } bool isImm0_32() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value >= 0 && Value < 33; } bool isImm0_65535() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value >= 0 && Value < 65536; } bool isImm256_65535Expr() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); // If it's not a constant expression, it'll generate a fixup and be // handled later. if (!CE) return true; @@ -1137,7 +1137,7 @@ } bool isImm0_65535Expr() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); // If it's not a constant expression, it'll generate a fixup and be // handled later. if (!CE) return true; @@ -1146,28 +1146,28 @@ } bool isImm24bit() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value >= 0 && Value <= 0xffffff; } bool isImmThumbSR() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value > 0 && Value < 33; } bool isPKHLSLImm() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value >= 0 && Value < 32; } bool isPKHASRImm() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value > 0 && Value <= 32; @@ -1180,7 +1180,7 @@ // If it is a constant, it must fit into a modified immediate encoding. if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return (ARM_AM::getSOImmVal(Value) != -1 || @@ -1188,14 +1188,14 @@ } bool isT2SOImm() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return ARM_AM::getT2SOImmVal(Value) != -1; } bool isT2SOImmNot() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return ARM_AM::getT2SOImmVal(Value) == -1 && @@ -1203,7 +1203,7 @@ } bool isT2SOImmNeg() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); // Only use this when not representable as a plain so_imm. @@ -1212,7 +1212,7 @@ } bool isSetEndImm() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return Value == 1 || Value == 0; @@ -1232,14 +1232,14 @@ bool isModImm() const { return Kind == k_ModifiedImmediate; } bool isModImmNot() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return ARM_AM::getSOImmVal(~Value) != -1; } bool isModImmNeg() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Value = CE->getValue(); return ARM_AM::getSOImmVal(Value) == -1 && @@ -1343,7 +1343,7 @@ bool isAM2OffsetImm() const { if (!isImm()) return false; // Immediate offset in range [-4095, 4095]. - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Val = CE->getValue(); return (Val == INT32_MIN) || (Val > -4096 && Val < 4096); @@ -1372,7 +1372,7 @@ if (Kind == k_PostIndexRegister) return PostIdxReg.ShiftTy == ARM_AM::no_shift; // Immediate offset in range [-255, 255]. - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Val = CE->getValue(); // Special case, #-0 is INT32_MIN. @@ -1561,14 +1561,14 @@ } bool isPostIdxImm8() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Val = CE->getValue(); return (Val > -256 && Val < 256) || (Val == INT32_MIN); } bool isPostIdxImm8s4() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (!CE) return false; int64_t Val = CE->getValue(); return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) || @@ -1777,7 +1777,7 @@ bool isNEONi8splat() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); // Must be a constant. if (!CE) return false; int64_t Value = CE->getValue(); @@ -1791,7 +1791,7 @@ return false; // Leave that for bytes replication and forbid by default. if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); // Must be a constant. if (!CE) return false; unsigned Value = CE->getValue(); @@ -1801,7 +1801,7 @@ bool isNEONi16splatNot() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); // Must be a constant. if (!CE) return false; unsigned Value = CE->getValue(); @@ -1813,7 +1813,7 @@ return false; // Leave that for bytes replication and forbid by default. if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); // Must be a constant. if (!CE) return false; unsigned Value = CE->getValue(); @@ -1823,7 +1823,7 @@ bool isNEONi32splatNot() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); // Must be a constant. if (!CE) return false; unsigned Value = CE->getValue(); @@ -1833,7 +1833,7 @@ bool isNEONByteReplicate(unsigned NumBytes) const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); // Must be a constant. if (!CE) return false; @@ -1856,7 +1856,7 @@ return false; // Let it to be classified as byte-replicate case. if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); // Must be a constant. if (!CE) return false; @@ -1873,7 +1873,7 @@ } bool isNEONi32vmovNeg() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); // Must be a constant. if (!CE) return false; int64_t Value = ~CE->getValue(); @@ -1890,7 +1890,7 @@ bool isNEONi64splat() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); // Must be a constant. if (!CE) return false; uint64_t Value = CE->getValue(); @@ -1904,7 +1904,7 @@ // Add as immediates when possible. Null MCExpr = 0. if (!Expr) Inst.addOperand(MCOperand::createImm(0)); - else if (const MCConstantExpr *CE = dyn_cast(Expr)) + else if (const auto *CE = dyn_cast(Expr)) Inst.addOperand(MCOperand::createImm(CE->getValue())); else Inst.addOperand(MCOperand::createExpr(Expr)); @@ -2023,14 +2023,14 @@ void addModImmNotOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue()); Inst.addOperand(MCOperand::createImm(Enc)); } void addModImmNegOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue()); Inst.addOperand(MCOperand::createImm(Enc)); } @@ -2053,19 +2053,19 @@ void addFBits16Operands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); Inst.addOperand(MCOperand::createImm(16 - CE->getValue())); } void addFBits32Operands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); Inst.addOperand(MCOperand::createImm(32 - CE->getValue())); } void addFPImmOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue())); Inst.addOperand(MCOperand::createImm(Val)); } @@ -2074,7 +2074,7 @@ assert(N == 1 && "Invalid number of operands!"); // FIXME: We really want to scale the value here, but the LDRD/STRD // instruction don't encode operands that way yet. - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); Inst.addOperand(MCOperand::createImm(CE->getValue())); } @@ -2082,7 +2082,7 @@ assert(N == 1 && "Invalid number of operands!"); // The immediate is scaled by four in the encoding and is stored // in the MCInst as such. Lop off the low two bits here. - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); Inst.addOperand(MCOperand::createImm(CE->getValue() / 4)); } @@ -2090,7 +2090,7 @@ assert(N == 1 && "Invalid number of operands!"); // The immediate is scaled by four in the encoding and is stored // in the MCInst as such. Lop off the low two bits here. - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4))); } @@ -2098,7 +2098,7 @@ assert(N == 1 && "Invalid number of operands!"); // The immediate is scaled by four in the encoding and is stored // in the MCInst as such. Lop off the low two bits here. - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); Inst.addOperand(MCOperand::createImm(CE->getValue() / 4)); } @@ -2106,7 +2106,7 @@ assert(N == 1 && "Invalid number of operands!"); // The constant encodes as the immediate-1, and we store in the instruction // the bits as encoded, so subtract off one here. - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); Inst.addOperand(MCOperand::createImm(CE->getValue() - 1)); } @@ -2114,7 +2114,7 @@ assert(N == 1 && "Invalid number of operands!"); // The constant encodes as the immediate-1, and we store in the instruction // the bits as encoded, so subtract off one here. - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); Inst.addOperand(MCOperand::createImm(CE->getValue() - 1)); } @@ -2122,7 +2122,7 @@ assert(N == 1 && "Invalid number of operands!"); // The constant encodes as the immediate, except for 32, which encodes as // zero. - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); unsigned Imm = CE->getValue(); Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm))); } @@ -2131,7 +2131,7 @@ assert(N == 1 && "Invalid number of operands!"); // An ASR value of 32 encodes as 0, so that's how we want to add it to // the instruction as well. - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); int Val = CE->getValue(); Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val)); } @@ -2140,7 +2140,7 @@ assert(N == 1 && "Invalid number of operands!"); // The operand is actually a t2_so_imm, but we have its bitwise // negation in the assembly source, so twiddle it here. - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); Inst.addOperand(MCOperand::createImm(~CE->getValue())); } @@ -2148,7 +2148,7 @@ assert(N == 1 && "Invalid number of operands!"); // The operand is actually a t2_so_imm, but we have its // negation in the assembly source, so twiddle it here. - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); Inst.addOperand(MCOperand::createImm(-CE->getValue())); } @@ -2156,17 +2156,17 @@ assert(N == 1 && "Invalid number of operands!"); // The operand is actually an imm0_4095, but we have its // negation in the assembly source, so twiddle it here. - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); Inst.addOperand(MCOperand::createImm(-CE->getValue())); } void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const { - if(const MCConstantExpr *CE = dyn_cast(getImm())) { + if(const auto *CE = dyn_cast(getImm())) { Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2)); return; } - const MCSymbolRefExpr *SR = dyn_cast(Imm.Val); + const auto *SR = dyn_cast(Imm.Val); assert(SR && "Unknown value type!"); Inst.addOperand(MCOperand::createExpr(SR)); } @@ -2174,13 +2174,13 @@ void addThumbMemPCOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); if (isImm()) { - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); if (CE) { Inst.addOperand(MCOperand::createImm(CE->getValue())); return; } - const MCSymbolRefExpr *SR = dyn_cast(Imm.Val); + const auto *SR = dyn_cast(Imm.Val); assert(SR && "Unknown value type!"); Inst.addOperand(MCOperand::createExpr(SR)); @@ -2224,7 +2224,7 @@ return; } - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); int Val = CE->getValue(); Inst.addOperand(MCOperand::createImm(Val)); } @@ -2301,7 +2301,7 @@ void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const { assert(N == 2 && "Invalid number of operands!"); - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); assert(CE && "non-constant AM2OffsetImm operand!"); int32_t Val = CE->getValue(); ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; @@ -2550,7 +2550,7 @@ void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); assert(CE && "non-constant post-idx-imm8 operand!"); int Imm = CE->getValue(); bool isAdd = Imm >= 0; @@ -2561,7 +2561,7 @@ void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); assert(CE && "non-constant post-idx-imm8s4 operand!"); int Imm = CE->getValue(); bool isAdd = Imm >= 0; @@ -2633,14 +2633,14 @@ assert(N == 1 && "Invalid number of operands!"); // The immediate encodes the type of constant as well as the value. // Mask in that this is an i8 splat. - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00)); } void addNEONi16splatOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); // The immediate encodes the type of constant as well as the value. - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); unsigned Value = CE->getValue(); Value = ARM_AM::encodeNEONi16splat(Value); Inst.addOperand(MCOperand::createImm(Value)); @@ -2649,7 +2649,7 @@ void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); // The immediate encodes the type of constant as well as the value. - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); unsigned Value = CE->getValue(); Value = ARM_AM::encodeNEONi16splat(~Value & 0xffff); Inst.addOperand(MCOperand::createImm(Value)); @@ -2658,7 +2658,7 @@ void addNEONi32splatOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); // The immediate encodes the type of constant as well as the value. - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); unsigned Value = CE->getValue(); Value = ARM_AM::encodeNEONi32splat(Value); Inst.addOperand(MCOperand::createImm(Value)); @@ -2667,7 +2667,7 @@ void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); // The immediate encodes the type of constant as well as the value. - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); unsigned Value = CE->getValue(); Value = ARM_AM::encodeNEONi32splat(~Value); Inst.addOperand(MCOperand::createImm(Value)); @@ -2676,7 +2676,7 @@ void addNEONinvByteReplicateOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); // The immediate encodes the type of constant as well as the value. - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); unsigned Value = CE->getValue(); assert((Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM::VMOVv16i8) && @@ -2689,7 +2689,7 @@ void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); // The immediate encodes the type of constant as well as the value. - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); unsigned Value = CE->getValue(); if (Value >= 256 && Value <= 0xffff) Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); @@ -2703,7 +2703,7 @@ void addNEONvmovByteReplicateOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); // The immediate encodes the type of constant as well as the value. - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); unsigned Value = CE->getValue(); assert((Inst.getOpcode() == ARM::VMOVv8i8 || Inst.getOpcode() == ARM::VMOVv16i8) && @@ -2716,7 +2716,7 @@ void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); // The immediate encodes the type of constant as well as the value. - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); unsigned Value = ~CE->getValue(); if (Value >= 256 && Value <= 0xffff) Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); @@ -2730,7 +2730,7 @@ void addNEONi64splatOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); // The immediate encodes the type of constant as well as the value. - const MCConstantExpr *CE = dyn_cast(getImm()); + const auto *CE = dyn_cast(getImm()); uint64_t Value = CE->getValue(); unsigned Imm = 0; for (unsigned i = 0; i < 8; ++i, Value >>= 8) { @@ -3302,7 +3302,7 @@ return -1; } // The expression must be evaluatable as an immediate. - const MCConstantExpr *CE = dyn_cast(ShiftExpr); + const auto *CE = dyn_cast(ShiftExpr); if (!CE) { Error(ImmLoc, "invalid immediate shift value"); return -1; @@ -3382,7 +3382,7 @@ const MCExpr *ImmVal; if (getParser().parseExpression(ImmVal)) return true; - const MCConstantExpr *MCE = dyn_cast(ImmVal); + const auto *MCE = dyn_cast(ImmVal); if (!MCE) return TokError("immediate value expected for vector index"); @@ -3546,7 +3546,7 @@ Error(Loc, "illegal expression"); return MatchOperand_ParseFail; } - const MCConstantExpr *CE = dyn_cast(Expr); + const auto *CE = dyn_cast(Expr); if (!CE || CE->getValue() < 0 || CE->getValue() > 255) { Error(Loc, "coprocessor option must be an immediate in range [0, 255]"); return MatchOperand_ParseFail; @@ -3765,7 +3765,7 @@ Error(Loc, "illegal expression"); return MatchOperand_ParseFail; } - const MCConstantExpr *CE = dyn_cast(LaneIndex); + const auto *CE = dyn_cast(LaneIndex); if (!CE) { Error(Loc, "lane index must be empty or an integer"); return MatchOperand_ParseFail; @@ -4095,7 +4095,7 @@ return MatchOperand_ParseFail; } - const MCConstantExpr *CE = dyn_cast(MemBarrierID); + const auto *CE = dyn_cast(MemBarrierID); if (!CE) { Error(Loc, "constant expression expected"); return MatchOperand_ParseFail; @@ -4145,7 +4145,7 @@ return MatchOperand_ParseFail; } - const MCConstantExpr *CE = dyn_cast(ISBarrierID); + const auto *CE = dyn_cast(ISBarrierID); if (!CE) { Error(Loc, "constant expression expected"); return MatchOperand_ParseFail; @@ -4444,7 +4444,7 @@ Error(Loc, "illegal expression"); return MatchOperand_ParseFail; } - const MCConstantExpr *CE = dyn_cast(ShiftAmount); + const auto *CE = dyn_cast(ShiftAmount); if (!CE) { Error(Loc, "constant expression expected"); return MatchOperand_ParseFail; @@ -4526,7 +4526,7 @@ Error(ExLoc, "malformed shift expression"); return MatchOperand_ParseFail; } - const MCConstantExpr *CE = dyn_cast(ShiftAmount); + const auto *CE = dyn_cast(ShiftAmount); if (!CE) { Error(ExLoc, "shift amount must be an immediate"); return MatchOperand_ParseFail; @@ -4588,7 +4588,7 @@ Error(ExLoc, "malformed rotate expression"); return MatchOperand_ParseFail; } - const MCConstantExpr *CE = dyn_cast(ShiftAmount); + const auto *CE = dyn_cast(ShiftAmount); if (!CE) { Error(ExLoc, "rotate amount must be an immediate"); return MatchOperand_ParseFail; @@ -4648,7 +4648,7 @@ return MatchOperand_ParseFail; } - const MCConstantExpr *CE = dyn_cast(Imm1Exp); + const auto *CE = dyn_cast(Imm1Exp); if (CE) { // Immediate must fit within 32-bits @@ -4743,7 +4743,7 @@ Error(E, "malformed immediate expression"); return MatchOperand_ParseFail; } - const MCConstantExpr *CE = dyn_cast(LSBExpr); + const auto *CE = dyn_cast(LSBExpr); if (!CE) { Error(E, "'lsb' operand must be an immediate"); return MatchOperand_ParseFail; @@ -4872,7 +4872,7 @@ SMLoc E; if (getParser().parseExpression(Offset, E)) return MatchOperand_ParseFail; - const MCConstantExpr *CE = dyn_cast(Offset); + const auto *CE = dyn_cast(Offset); if (!CE) { Error(S, "constant expression expected"); return MatchOperand_ParseFail; @@ -5050,7 +5050,7 @@ // The expression has to be a constant. Memory references with relocations // don't come through here, as they use the