Index: docs/ProgrammersManual.rst =================================================================== --- docs/ProgrammersManual.rst +++ docs/ProgrammersManual.rst @@ -117,6 +117,20 @@ an instance of the specified class. This can be very useful for constraint checking of various sorts (example below). +``isoneof<>``: + It's a facility function similar to ``isa<>`` but it can check for multiple + types in a single call: + + .. code-block:: c++ + + if (isoneof(V)) + return true; + + // is equivalent to + + if (isa(V) || isa(V) || isa(V)) + return true; + ``cast<>``: The ``cast<>`` operator is a "checked cast" operation. It converts a pointer or reference from a base class to a derived class, causing an assertion Index: include/llvm/Analysis/ObjCARCAnalysisUtils.h =================================================================== --- include/llvm/Analysis/ObjCARCAnalysisUtils.h +++ include/llvm/Analysis/ObjCARCAnalysisUtils.h @@ -127,7 +127,7 @@ } inline bool IsNullOrUndef(const Value *V) { - return isa(V) || isa(V); + return isoneof(V); } inline bool IsNoopInstruction(const Instruction *I) { @@ -140,7 +140,7 @@ inline bool IsPotentialRetainableObjPtr(const Value *Op) { // Pointers to static or stack storage are not valid retainable object // pointers. - if (isa(Op) || isa(Op)) + if (isoneof(Op)) return false; // Special arguments can not be a valid retainable object pointer. if (const Argument *Arg = dyn_cast(Op)) @@ -201,9 +201,7 @@ // Assume that call results and arguments have their own "provenance". // Constants (including GlobalVariables) and Allocas are never // reference-counted. - if (isa(V) || isa(V) || - isa(V) || isa(V) || - isa(V)) + if (isoneof(V)) return true; if (const LoadInst *LI = dyn_cast(V)) { Index: include/llvm/Analysis/TargetTransformInfoImpl.h =================================================================== --- include/llvm/Analysis/TargetTransformInfoImpl.h +++ include/llvm/Analysis/TargetTransformInfoImpl.h @@ -470,7 +470,7 @@ // Obtain the minimum required size to hold the value (without the sign) // In case of a vector it returns the min required size for one element. unsigned minRequiredElementSize(const Value* Val, bool &isSigned) { - if (isa(Val) || isa(Val)) { + if (isoneof(Val)) { const auto* VectorValue = cast(Val); // In case of a vector need to pick the max between the min Index: include/llvm/CodeGen/BasicTTIImpl.h =================================================================== --- include/llvm/CodeGen/BasicTTIImpl.h +++ include/llvm/CodeGen/BasicTTIImpl.h @@ -310,8 +310,7 @@ BasicBlock *BB = *I; for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J) - if (isa(J) || isa(J)) { - ImmutableCallSite CS(&*J); + if (ImmutableCallSite CS{&*J}) { if (const Function *F = CS.getCalledFunction()) { if (!static_cast(this)->isLoweredToCall(F)) continue; Index: include/llvm/IR/CallSite.h =================================================================== --- include/llvm/IR/CallSite.h +++ include/llvm/IR/CallSite.h @@ -110,7 +110,7 @@ Value *V = getCalledValue(); if (!V) return false; - if (isa(V) || isa(V)) + if (isoneof(V)) return false; if (CallInst *CI = dyn_cast(getInstruction())) { if (CI->isInlineAsm()) Index: include/llvm/IR/Operator.h =================================================================== --- include/llvm/IR/Operator.h +++ include/llvm/IR/Operator.h @@ -63,7 +63,7 @@ static inline bool classof(const Instruction *) { return true; } static inline bool classof(const ConstantExpr *) { return true; } static inline bool classof(const Value *V) { - return isa(V) || isa(V); + return isoneof(V); } }; Index: include/llvm/IR/PatternMatch.h =================================================================== --- include/llvm/IR/PatternMatch.h +++ include/llvm/IR/PatternMatch.h @@ -876,7 +876,7 @@ private: bool matchIfNot(Value *LHS, Value *RHS) { - return (isa(RHS) || isa(RHS) || + return (isoneof(RHS) || // FIXME: Remove CV. isa(RHS)) && cast(RHS)->isAllOnesValue() && L.match(LHS); Index: include/llvm/IR/Statepoint.h =================================================================== --- include/llvm/IR/Statepoint.h +++ include/llvm/IR/Statepoint.h @@ -336,7 +336,7 @@ bool isTiedToInvoke() const { const Value *Token = getArgOperand(0); - return isa(Token) || isa(Token); + return isoneof(Token); } /// The statepoint with which this gc.relocate is associated. Index: include/llvm/IR/User.h =================================================================== --- include/llvm/IR/User.h +++ include/llvm/IR/User.h @@ -289,7 +289,7 @@ // Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Value *V) { - return isa(V) || isa(V); + return isoneof(V); } }; Index: include/llvm/IR/Value.h =================================================================== --- include/llvm/IR/Value.h +++ include/llvm/IR/Value.h @@ -802,19 +802,19 @@ template <> struct isa_impl { static inline bool doit(const Value &Val) { - return isa(Val) || isa(Val); + return isoneof(Val); } }; template <> struct isa_impl { static inline bool doit(const Value &Val) { - return isa(Val) || isa(Val); + return isoneof(Val); } }; template <> struct isa_impl { static inline bool doit(const Value &Val) { - return isa(Val) || isa(Val); + return isoneof(Val); } }; Index: include/llvm/Support/Casting.h =================================================================== --- include/llvm/Support/Casting.h +++ include/llvm/Support/Casting.h @@ -142,6 +142,23 @@ typename simplify_type::SimpleType>::doit(Val); } +// isoneof - Return true if the parameter to the template is an instance +// of one of the template type argument. Used like this: +// +// if (isa(myVal)) { ... } +// +// which is equivalent to +// +// if (isa(myVal) || isa(myVal) || isa(myVal)) { ... } +// +template LLVM_NODISCARD inline bool isoneof(const Y &Val) { + return false; +} +template +LLVM_NODISCARD inline bool isoneof(const Y &Val) { + return isa(Val) || isoneof(Val); +} + //===----------------------------------------------------------------------===// // cast Support Templates //===----------------------------------------------------------------------===// Index: lib/Analysis/AliasAnalysis.cpp =================================================================== --- lib/Analysis/AliasAnalysis.cpp +++ lib/Analysis/AliasAnalysis.cpp @@ -463,8 +463,7 @@ const Value *Object = GetUnderlyingObject(MemLoc.Ptr, I->getModule()->getDataLayout()); - if (!isIdentifiedObject(Object) || isa(Object) || - isa(Object)) + if (!isIdentifiedObject(Object) || isoneof(Object)) return MRI_ModRef; ImmutableCallSite CS(I); Index: lib/Analysis/AssumptionCache.cpp =================================================================== --- lib/Analysis/AssumptionCache.cpp +++ lib/Analysis/AssumptionCache.cpp @@ -58,7 +58,7 @@ if (match(I, m_BitCast(m_Value(Op))) || match(I, m_PtrToInt(m_Value(Op))) || match(I, m_Not(m_Value(Op)))) { - if (isa(Op) || isa(Op)) + if (isoneof(Op)) Affected.push_back(Op); } } Index: lib/Analysis/BasicAliasAnalysis.cpp =================================================================== --- lib/Analysis/BasicAliasAnalysis.cpp +++ lib/Analysis/BasicAliasAnalysis.cpp @@ -110,7 +110,7 @@ /// Returns true if the pointer is one which would have been considered an /// escape by isNonEscapingLocalObject. static bool isEscapeSource(const Value *V) { - if (isa(V) || isa(V) || isa(V)) + if (isoneof(V)) return true; // The load case works because isNonEscapingLocalObject considers all @@ -283,7 +283,7 @@ // Since GEP indices are sign extended anyway, we don't care about the high // bits of a sign or zero extended value - just scales and offsets. The // extensions have to be consistent though. - if (isa(V) || isa(V)) { + if (isoneof(V)) { Value *CastOp = cast(V)->getOperand(0); unsigned NewWidth = V->getType()->getPrimitiveSizeInBits(); unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits(); @@ -1089,8 +1089,7 @@ // We need the object to be an alloca or a globalvariable, and want to know // the offset of the pointer from the object precisely, so no variable // indices are allowed. - if (!(isa(DecompObject.Base) || - isa(DecompObject.Base)) || + if (!isoneof(DecompObject.Base) || !DecompObject.VarIndices.empty()) return false; Index: lib/Analysis/CFLSteensAliasAnalysis.cpp =================================================================== --- lib/Analysis/CFLSteensAliasAnalysis.cpp +++ lib/Analysis/CFLSteensAliasAnalysis.cpp @@ -116,9 +116,8 @@ // TODO: Because all of these things are constant, we can determine whether // the data is *actually* mutable at graph building time. This will probably // come for free/cheap with offset awareness. - bool CanStoreMutableData = isa(Val) || - isa(Val) || - isa(Val); + bool CanStoreMutableData = + isoneof(Val); return !CanStoreMutableData; } Index: lib/Analysis/CodeMetrics.cpp =================================================================== --- lib/Analysis/CodeMetrics.cpp +++ lib/Analysis/CodeMetrics.cpp @@ -127,8 +127,7 @@ continue; // Special handling for calls. - if (isa(I) || isa(I)) { - ImmutableCallSite CS(&I); + if (ImmutableCallSite CS{&I}) { if (const Function *F = CS.getCalledFunction()) { // If a function is both internal and has a single use, then it is Index: lib/Analysis/ConstantFolding.cpp =================================================================== --- lib/Analysis/ConstantFolding.cpp +++ lib/Analysis/ConstantFolding.cpp @@ -136,7 +136,7 @@ // If this is a scalar -> vector cast, convert the input into a <1 x scalar> // vector so the code below can handle it uniformly. - if (isa(C) || isa(C)) { + if (isoneof(C)) { Constant *Ops = C; // don't take the address of C! return FoldBitCast(ConstantVector::get(Ops), DestTy, DL); } @@ -333,7 +333,7 @@ // If this element is zero or undefined, we can just return since *CurPtr is // zero initialized. - if (isa(C) || isa(C)) + if (isoneof(C)) return true; if (auto *CI = dyn_cast(C)) { @@ -407,8 +407,7 @@ // not reached. } - if (isa(C) || isa(C) || - isa(C)) { + if (isoneof(C)) { Type *EltTy = C->getType()->getSequentialElementType(); uint64_t EltSize = DL.getTypeAllocSize(EltTy); uint64_t Index = ByteOffset / EltSize; @@ -1053,7 +1052,7 @@ auto *NewC = cast(&NewU); // Recursively fold the ConstantExpr's operands. If we have already folded // a ConstantExpr, we don't have to process it again. - if (isa(NewC) || isa(NewC)) { + if (isoneof(NewC)) { auto It = FoldedOps.find(NewC); if (It == FoldedOps.end()) { if (auto *FoldedC = @@ -1743,8 +1742,7 @@ } // Support ConstantVector in case we have an Undef in the top. - if (isa(Operands[0]) || - isa(Operands[0])) { + if (isoneof(Operands[0])) { auto *Op = cast(Operands[0]); switch (IntrinsicID) { default: break; Index: lib/Analysis/CostModel.cpp =================================================================== --- lib/Analysis/CostModel.cpp +++ lib/Analysis/CostModel.cpp @@ -148,7 +148,7 @@ TargetTransformInfo::OK_AnyValue; // Check for a splat of a constant or for a non uniform vector of constants. - if (isa(V) || isa(V)) { + if (isoneof(V)) { OpInfo = TargetTransformInfo::OK_NonUniformConstantValue; if (cast(V)->getSplatValue() != nullptr) OpInfo = TargetTransformInfo::OK_UniformConstantValue; @@ -157,7 +157,7 @@ // Check for a splat of a uniform value. This is not loop aware, so return // true only for the obviously uniform cases (argument, globalvalue) const Value *Splat = getSplatValue(V); - if (Splat && (isa(Splat) || isa(Splat))) + if (Splat && isoneof(Splat)) OpInfo = TargetTransformInfo::OK_UniformValue; return OpInfo; Index: lib/Analysis/DemandedBits.cpp =================================================================== --- lib/Analysis/DemandedBits.cpp +++ lib/Analysis/DemandedBits.cpp @@ -67,8 +67,8 @@ } static bool isAlwaysLive(Instruction *I) { - return isa(I) || isa(I) || - I->isEHPad() || I->mayHaveSideEffects(); + return isoneof(I) || I->isEHPad() || + I->mayHaveSideEffects(); } void DemandedBits::determineLiveOperandBits( Index: lib/Analysis/DependenceAnalysis.cpp =================================================================== --- lib/Analysis/DependenceAnalysis.cpp +++ lib/Analysis/DependenceAnalysis.cpp @@ -166,10 +166,10 @@ auto *F = DA->getFunction(); for (inst_iterator SrcI = inst_begin(F), SrcE = inst_end(F); SrcI != SrcE; ++SrcI) { - if (isa(*SrcI) || isa(*SrcI)) { + if (isoneof(*SrcI)) { for (inst_iterator DstI = SrcI, DstE = inst_end(F); DstI != DstE; ++DstI) { - if (isa(*DstI) || isa(*DstI)) { + if (isoneof(*DstI)) { OS << "da analyze - "; if (auto D = DA->depends(&*SrcI, &*DstI, true)) { D->dump(OS); Index: lib/Analysis/GlobalsModRef.cpp =================================================================== --- lib/Analysis/GlobalsModRef.cpp +++ lib/Analysis/GlobalsModRef.cpp @@ -604,9 +604,8 @@ Inputs.push_back(V); do { const Value *Input = Inputs.pop_back_val(); - - if (isa(Input) || isa(Input) || isa(Input) || - isa(Input)) + + if (isoneof(Input)) // Arguments to functions or returns from functions are inherently // escaping, so we can immediately classify those as not aliasing any // non-addr-taken globals. @@ -718,8 +717,7 @@ return false; } - if (isa(Input) || isa(Input) || - isa(Input)) { + if (isoneof(Input)) { // Arguments to functions or returns from functions are inherently // escaping, so we can immediately classify those as not aliasing any // non-addr-taken globals. Index: lib/Analysis/InstructionSimplify.cpp =================================================================== --- lib/Analysis/InstructionSimplify.cpp +++ lib/Analysis/InstructionSimplify.cpp @@ -1265,7 +1265,7 @@ return true; // If all lanes of a vector shift are undefined the whole shift is. - if (isa(C) || isa(C)) { + if (isoneof(C)) { for (unsigned I = 0, E = C->getType()->getVectorNumElements(); I != E; ++I) if (!isUndefShift(C->getAggregateElement(I))) return false; @@ -2118,8 +2118,7 @@ // // Note that it's not necessary to check for LHS being a global variable // address, due to canonicalization and constant folding. - if (isa(LHS) && - (isa(RHS) || isa(RHS))) { + if (isa(LHS) && isoneof(RHS)) { ConstantInt *LHSOffsetCI = dyn_cast(LHSOffset); ConstantInt *RHSOffsetCI = dyn_cast(RHSOffset); uint64_t LHSSize, RHSSize; @@ -3108,7 +3107,7 @@ } // Compare of cast, for example (zext X) != 0 -> X != 0 - if (isa(LHS) && (isa(RHS) || isa(RHS))) { + if (isa(LHS) && isoneof(RHS)) { Instruction *LI = cast(LHS); Value *SrcOp = LI->getOperand(0); Type *SrcTy = SrcOp->getType(); @@ -4450,7 +4449,7 @@ // call undef -> undef // call null -> undef - if (isa(V) || isa(V)) + if (isoneof(V)) return UndefValue::get(FTy->getReturnType()); Function *F = dyn_cast(V); Index: lib/Analysis/Loads.cpp =================================================================== --- lib/Analysis/Loads.cpp +++ lib/Analysis/Loads.cpp @@ -167,8 +167,7 @@ // this function is only used when one address use dominates the // other, which means that they'll always either have the same // value or one of them will have an undefined value. - if (isa(A) || isa(A) || isa(A) || - isa(A)) + if (isoneof(A)) if (const Instruction *BI = dyn_cast(B)) if (cast(A)->isIdenticalToWhenDefined(BI)) return true; @@ -397,8 +396,8 @@ // If both StrippedPtr and StorePtr reach all the way to an alloca or // global and they are different, ignore the store. This is a trivial form // of alias analysis that is important for reg2mem'd code. - if ((isa(StrippedPtr) || isa(StrippedPtr)) && - (isa(StorePtr) || isa(StorePtr)) && + if (isoneof(StrippedPtr) && + isoneof(StorePtr) && StrippedPtr != StorePtr) continue; Index: lib/Analysis/MemDepPrinter.cpp =================================================================== --- lib/Analysis/MemDepPrinter.cpp +++ lib/Analysis/MemDepPrinter.cpp @@ -117,8 +117,8 @@ } } else { SmallVector NLDI; - assert( (isa(Inst) || isa(Inst) || - isa(Inst)) && "Unknown memory instruction!"); + assert((isoneof(Inst)) && + "Unknown memory instruction!"); MDA.getNonLocalPointerDependency(Inst, NLDI); DepSet &InstDeps = Deps[Inst]; Index: lib/Analysis/MemoryBuiltins.cpp =================================================================== --- lib/Analysis/MemoryBuiltins.cpp +++ lib/Analysis/MemoryBuiltins.cpp @@ -768,8 +768,7 @@ } else if (isa(V) || (isa(V) && cast(V)->getOpcode() == Instruction::IntToPtr) || - isa(V) || - isa(V)) { + isoneof(V)) { // Ignore values where we cannot do more than ObjectSizeVisitor. Result = unknown(); } else { Index: lib/Analysis/MemoryDependenceAnalysis.cpp =================================================================== --- lib/Analysis/MemoryDependenceAnalysis.cpp +++ lib/Analysis/MemoryDependenceAnalysis.cpp @@ -413,7 +413,7 @@ // If we hit load/store with the same invariant.group metadata (and the // same pointer operand) we can assume that value pointed by pointer // operand didn't change. - if ((isa(U) || isa(U)) && + if (isoneof(U) && U->getMetadata(LLVMContext::MD_invariant_group) == InvariantGroupMD) ClosestDependency = GetClosestDependency(ClosestDependency, U); } @@ -744,8 +744,7 @@ LocalCache = getPointerDependencyFrom( MemLoc, isLoad, ScanPos->getIterator(), QueryParent, QueryInst); - } else if (isa(QueryInst) || isa(QueryInst)) { - CallSite QueryCS(QueryInst); + } else if (CallSite QueryCS{QueryInst}) { bool isReadOnly = AA.onlyReadsMemory(QueryCS); LocalCache = getCallSiteDependencyFrom( QueryCS, isReadOnly, ScanPos->getIterator(), QueryParent); Index: lib/Analysis/PHITransAddr.cpp =================================================================== --- lib/Analysis/PHITransAddr.cpp +++ lib/Analysis/PHITransAddr.cpp @@ -23,8 +23,7 @@ using namespace llvm; static bool CanPHITrans(Instruction *Inst) { - if (isa(Inst) || - isa(Inst)) + if (isoneof(Inst)) return true; if (isa(Inst) && @@ -36,7 +35,7 @@ return true; // cerr << "MEMDEP: Could not PHI translate: " << *Pointer; - // if (isa(PtrInst) || isa(PtrInst)) + // if (isoneof(PtrInst)) // cerr << "OP:\t\t\t\t" << *PtrInst->getOperand(0); return false; } Index: lib/Analysis/ProfileSummaryInfo.cpp =================================================================== --- lib/Analysis/ProfileSummaryInfo.cpp +++ lib/Analysis/ProfileSummaryInfo.cpp @@ -73,7 +73,7 @@ BlockFrequencyInfo *BFI) { if (!Inst) return None; - assert((isa(Inst) || isa(Inst)) && + assert((isoneof(Inst)) && "We can only get profile count for call/invoke instruction."); if (computeSummary() && Summary->getKind() == ProfileSummary::PSK_Sample) { // In sample PGO mode, check if there is a profile metadata on the @@ -114,7 +114,7 @@ uint64_t TotalCallCount = 0; for (const auto &BB : *F) for (const auto &I : BB) - if (isa(I) || isa(I)) + if (isoneof(I)) if (auto CallCount = getProfileCount(&I, nullptr)) TotalCallCount += CallCount.getValue(); return isHotCount(TotalCallCount); @@ -132,8 +132,8 @@ uint64_t TotalCallCount = 0; for (const auto &BB : *F) - for (const auto &I : BB) - if (isa(I) || isa(I)) + for (const auto &I : BB) + if (isoneof(I)) if (auto CallCount = getProfileCount(&I, nullptr)) TotalCallCount += CallCount.getValue(); return isColdCount(TotalCallCount); Index: lib/Analysis/ScalarEvolution.cpp =================================================================== --- lib/Analysis/ScalarEvolution.cpp +++ lib/Analysis/ScalarEvolution.cpp @@ -2289,7 +2289,7 @@ // Evaluate the expression in the larger type. const SCEV *Fold = getAddExpr(LargeOps, Flags, Depth + 1); // If it folds to something simple, use it. Otherwise, don't. - if (isa(Fold) || isa(Fold)) + if (isoneof(Fold)) return getTruncateExpr(Fold, DstType); } } @@ -2591,7 +2591,7 @@ if (isa(*CurrentExpr)) return true; - if (isa(*CurrentExpr) || isa(*CurrentExpr)) { + if (isoneof(*CurrentExpr)) { const auto *CurrentNAry = cast(CurrentExpr); Ops.append(CurrentNAry->op_begin(), CurrentNAry->op_end()); } @@ -6676,9 +6676,8 @@ /// Return true if we can constant fold an instruction of the specified type, /// assuming that all operands were constants. static bool CanConstantFold(const Instruction *I) { - if (isa(I) || isa(I) || - isa(I) || isa(I) || isa(I) || - isa(I)) + if (isoneof(I)) return true; if (const CallInst *CI = dyn_cast(I)) @@ -7602,7 +7601,7 @@ // Not all instructions that are "identical" compute the same value. For // instance, two distinct alloca instructions allocating the same type are // identical and do not read memory; but compute distinct values. - return A->isIdenticalTo(B) && (isa(A) || isa(A)); + return A->isIdenticalTo(B) && isoneof(A); }; // Otherwise, if they're both SCEVUnknown, it's possible that they hold @@ -9410,8 +9409,7 @@ : Terms(T) {} bool follow(const SCEV *S) { - if (isa(S) || isa(S) || - isa(S)) { + if (isoneof(S)) { if (!containsUndefs(S)) Terms.push_back(S); Index: lib/Analysis/ScalarEvolutionExpander.cpp =================================================================== --- lib/Analysis/ScalarEvolutionExpander.cpp +++ lib/Analysis/ScalarEvolutionExpander.cpp @@ -95,7 +95,7 @@ while (isa(IP)) ++IP; - if (isa(IP) || isa(IP)) { + if (isoneof(IP)) { ++IP; } else if (isa(IP)) { IP = MustDominate->getFirstInsertionPt(); @@ -2021,7 +2021,7 @@ // HowManyLessThans uses a Max expression whenever the loop is not guarded by // the exit condition. - if (isa(S) || isa(S)) + if (isoneof(S)) return true; // Recurse past nary expressions, which commonly occur in the Index: lib/Analysis/ValueTracking.cpp =================================================================== --- lib/Analysis/ValueTracking.cpp +++ lib/Analysis/ValueTracking.cpp @@ -1514,7 +1514,7 @@ return; } // Null and aggregate-zero are all-zeros. - if (isa(V) || isa(V)) { + if (isoneof(V)) { Known.setAllZero(); return; } @@ -1835,7 +1835,7 @@ return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q); // ext X != 0 if X != 0. - if (isa(V) || isa(V)) + if (isoneof(V)) return isKnownNonZero(cast(V)->getOperand(0), Depth, Q); // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined @@ -2553,7 +2553,7 @@ return true; // sitofp and uitofp turn into +0.0 for zero. - if (isa(I) || isa(I)) + if (isoneof(I)) return true; if (const CallInst *CI = dyn_cast(I)) { @@ -3418,7 +3418,7 @@ bool llvm::isKnownNonNullAt(const Value *V, const Instruction *CtxI, const DominatorTree *DT) { - if (isa(V) || isa(V)) + if (isoneof(V)) return false; if (isKnownNonNull(V)) Index: lib/Analysis/VectorUtils.cpp =================================================================== --- lib/Analysis/VectorUtils.cpp +++ lib/Analysis/VectorUtils.cpp @@ -328,13 +328,12 @@ for (auto &I : *BB) { InstructionSet.insert(&I); - if (TTI && (isa(&I) || isa(&I)) && + if (TTI && isoneof(&I) && !TTI->isTypeLegal(I.getOperand(0)->getType())) SeenExtFromIllegalType = true; // Only deal with non-vector integers up to 64-bits wide. - if ((isa(&I) || isa(&I)) && - !I.getType()->isVectorTy() && + if (isoneof(&I) && !I.getType()->isVectorTy() && I.getOperand(0)->getType()->getScalarSizeInBits() <= 64) { // Don't make work for ourselves. If we know the loaded type is legal, // don't add it to the worklist. @@ -374,14 +373,13 @@ // Casts, loads and instructions outside of our range terminate a chain // successfully. - if (isa(I) || isa(I) || isa(I) || - !InstructionSet.count(I)) + if (isoneof(I) || !InstructionSet.count(I)) continue; // Unsafe casts terminate a chain unsuccessfully. We can't do anything // useful with bitcasts, ptrtoints or inttoptrs and it'd be unsafe to // transform anything that relies on them. - if (isa(I) || isa(I) || isa(I) || + if (isoneof(I) || !I->getType()->isIntegerTy()) { DBits[Leader] |= ~0ULL; continue; Index: lib/Bitcode/Writer/ValueEnumerator.cpp =================================================================== --- lib/Bitcode/Writer/ValueEnumerator.cpp +++ lib/Bitcode/Writer/ValueEnumerator.cpp @@ -251,7 +251,7 @@ for (const BasicBlock &BB : F) for (const Instruction &I : BB) for (const Value *Op : I.operands()) - if (isa(*Op) || isa(*Op)) // Visit GlobalValues. + if (isoneof(*Op)) // Visit GlobalValues. predictValueUseListOrder(Op, &F, OM, Stack); for (const BasicBlock &BB : F) for (const Instruction &I : BB) @@ -634,9 +634,8 @@ if (!MD) return nullptr; - assert( - (isa(MD) || isa(MD) || isa(MD)) && - "Invalid metadata kind"); + assert((isoneof(MD)) && + "Invalid metadata kind"); auto Insertion = MetadataMap.insert(std::make_pair(MD, MDIndex(F))); MDIndex &Entry = Insertion.first->second; Index: lib/CodeGen/AsmPrinter/AsmPrinter.cpp =================================================================== --- lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -2380,7 +2380,7 @@ if (!BaseCV && CV->hasOneUse()) BaseCV = dyn_cast(CV->user_back()); - if (isa(CV) || isa(CV)) + if (isoneof(CV)) return AP.OutStreamer->EmitZeros(Size); if (const ConstantInt *CI = dyn_cast(CV)) { Index: lib/CodeGen/AsmPrinter/DwarfUnit.cpp =================================================================== --- lib/CodeGen/AsmPrinter/DwarfUnit.cpp +++ lib/CodeGen/AsmPrinter/DwarfUnit.cpp @@ -757,8 +757,7 @@ unsigned Flags = IsImplementation ? dwarf::DW_FLAG_type_implementation : 0; DD->addAccelType(Ty->getName(), TyDIE, Flags); - if (!Context || isa(Context) || isa(Context) || - isa(Context)) + if (!Context || isoneof(Context)) addGlobalType(Ty, TyDIE, Context); } } Index: lib/CodeGen/CodeGenPrepare.cpp =================================================================== --- lib/CodeGen/CodeGenPrepare.cpp +++ lib/CodeGen/CodeGenPrepare.cpp @@ -676,9 +676,7 @@ // such empty block (BB), ISel will place COPY instructions in BB, not in the // predecessor of BB. BasicBlock *Pred = BB->getUniquePredecessor(); - if (!Pred || - !(isa(Pred->getTerminator()) || - isa(Pred->getTerminator()))) + if (!Pred || !isoneof(Pred->getTerminator())) return true; if (BB->getTerminator() != BB->getFirstNonPHI()) @@ -3333,8 +3331,7 @@ TypePromotionHelper::Action TypePromotionHelper::getAction( Instruction *Ext, const SetOfInstrs &InsertedInsts, const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) { - assert((isa(Ext) || isa(Ext)) && - "Unexpected instruction type"); + assert((isoneof(Ext)) && "Unexpected instruction type"); Instruction *ExtOpnd = dyn_cast(Ext->getOperand(0)); Type *ExtTy = Ext->getType(); bool IsSExt = isa(Ext); @@ -3352,8 +3349,7 @@ // SExt or Trunc instructions. // Return the related handler. - if (isa(ExtOpnd) || isa(ExtOpnd) || - isa(ExtOpnd)) + if (isoneof(ExtOpnd)) return promoteOperandForTruncAndAnyExt; // Regular instruction. @@ -4920,7 +4916,7 @@ if (UserBB == DefBB) continue; // Be conservative. We don't want this xform to end up introducing // reloads just before load / store instructions. - if (isa(UI) || isa(UI) || isa(UI)) + if (isoneof(UI)) return false; } @@ -5584,8 +5580,7 @@ // Moreover, one argument is a constant that can be viewed as a splat // constant. Value *Arg0 = Inst->getOperand(0); - bool IsArg0Constant = isa(Arg0) || isa(Arg0) || - isa(Arg0); + bool IsArg0Constant = isoneof(Arg0); TargetTransformInfo::OperandValueKind Arg0OVK = IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue : TargetTransformInfo::OK_AnyValue; @@ -5767,8 +5762,7 @@ Value *NewVal = nullptr; if (Val == Transition) NewVal = Transition->getOperand(getTransitionOriginalValueIdx()); - else if (isa(Val) || isa(Val) || - isa(Val)) { + else if (isoneof(Val)) { // Use a splat constant if it is not safe to use undef. NewVal = getConstantVector( cast(Val), @@ -5982,7 +5976,7 @@ if (TLI && OptimizeNoopCopyExpression(CI, *TLI, *DL)) return true; - if (isa(I) || isa(I)) { + if (isoneof(I)) { /// Sink a zext or sext into its user blocks if the target type doesn't /// fit in one register if (TLI && Index: lib/CodeGen/GCRootLowering.cpp =================================================================== --- lib/CodeGen/GCRootLowering.cpp +++ lib/CodeGen/GCRootLowering.cpp @@ -135,8 +135,7 @@ // libcalls upon lowering (e.g., div i64 on a 32-bit platform), so instead // it is necessary to take a conservative approach. - if (isa(I) || isa(I) || isa(I) || - isa(I)) + if (isoneof(I)) return false; // llvm.gcroot is safe because it doesn't do anything at runtime. Index: lib/CodeGen/MachineFunction.cpp =================================================================== --- lib/CodeGen/MachineFunction.cpp +++ lib/CodeGen/MachineFunction.cpp @@ -918,8 +918,8 @@ if (A->getType() == B->getType()) return false; // We can't handle structs or arrays. - if (isa(A->getType()) || isa(A->getType()) || - isa(B->getType()) || isa(B->getType())) + if (isoneof(A->getType()) || + isoneof(B->getType())) return false; // For now, only support constants with the same size. Index: lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -8524,7 +8524,7 @@ } // If the input is a constant, let getNode fold it. - if (isa(N0) || isa(N0)) { + if (isoneof(N0)) { // If we can't allow illegal operations, we need to check that this is just // a fp -> int or int -> conversion and that the resulting operation will // be legal. @@ -10924,7 +10924,7 @@ // Check #1. Preinc'ing a frame index would require copying the stack pointer // (plus the implicit offset) to a register to preinc anyway. - if (isa(BasePtr) || isa(BasePtr)) + if (isoneof(BasePtr)) return false; // Check #2. @@ -11138,7 +11138,7 @@ // nor a successor of N. Otherwise, if Op is folded that would // create a cycle. - if (isa(BasePtr) || isa(BasePtr)) + if (isoneof(BasePtr)) continue; // Check for #1. @@ -12432,8 +12432,8 @@ return; bool IsLoadSrc = isa(St->getValue()); - bool IsConstantSrc = isa(St->getValue()) || - isa(St->getValue()); + bool IsConstantSrc = + isoneof(St->getValue()); bool IsExtractVecSrc = (St->getValue().getOpcode() == ISD::EXTRACT_VECTOR_ELT || St->getValue().getOpcode() == ISD::EXTRACT_SUBVECTOR); @@ -12449,8 +12449,7 @@ if (!isa(Other->getValue())) return false; if (IsConstantSrc) - if (!(isa(Other->getValue()) || - isa(Other->getValue()))) + if (!isoneof(Other->getValue())) return false; if (IsExtractVecSrc) if (!(Other->getValue().getOpcode() == ISD::EXTRACT_VECTOR_ELT || @@ -12546,8 +12545,7 @@ // are not constants, loads, or extracted vector elements. SDValue StoredVal = St->getValue(); bool IsLoadSrc = isa(StoredVal); - bool IsConstantSrc = isa(StoredVal) || - isa(StoredVal); + bool IsConstantSrc = isoneof(StoredVal); bool IsExtractVecSrc = (StoredVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT || StoredVal.getOpcode() == ISD::EXTRACT_SUBVECTOR); Index: lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp =================================================================== --- lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp +++ lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp @@ -171,8 +171,7 @@ } // Look for inline asm that clobbers the SP register. - if (isa(I) || isa(I)) { - ImmutableCallSite CS(&I); + if (ImmutableCallSite CS{&I}) { if (isa(CS.getCalledValue())) { unsigned SP = TLI->getStackPointerRegisterToSaveRestore(); const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); @@ -405,7 +404,7 @@ LiveOutInfo &DestLOI = LiveOutRegInfo[DestReg]; Value *V = PN->getIncomingValue(0); - if (isa(V) || isa(V)) { + if (isoneof(V)) { DestLOI.NumSignBits = 1; DestLOI.Known = KnownBits(BitWidth); return; @@ -438,7 +437,7 @@ for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) { Value *V = PN->getIncomingValue(i); - if (isa(V) || isa(V)) { + if (isoneof(V)) { DestLOI.NumSignBits = 1; DestLOI.Known = KnownBits(BitWidth); return; Index: lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -995,7 +995,7 @@ // If we already have an SDValue for this value, use it. SDValue &N = NodeMap[V]; if (N.getNode()) { - if (isa(N) || isa(N)) { + if (isoneof(N)) { // Remove the debug location from the node as the node is about to be used // in a location which may differ from the original debug location. This // is relevant to Constant and ConstantFP nodes because they can appear @@ -1045,7 +1045,7 @@ return N1; } - if (isa(C) || isa(C)) { + if (isoneof(C)) { SmallVector Constants; for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end(); OI != OE; ++OI) { @@ -1078,7 +1078,7 @@ } if (C->getType()->isStructTy() || C->getType()->isArrayTy()) { - assert((isa(C) || isa(C)) && + assert((isoneof(C)) && "Unknown struct or array constant!"); SmallVector ValueVTs; @@ -1613,9 +1613,8 @@ } // If this node is not part of the or/and tree, emit it as a branch. - if (!BOp || !(isa(BOp) || isa(BOp)) || - BOpc != Opc || !BOp->hasOneUse() || - BOp->getParent() != CurBB->getBasicBlock() || + if (!BOp || !isoneof(BOp) || BOpc != Opc || + !BOp->hasOneUse() || BOp->getParent() != CurBB->getBasicBlock() || !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) || !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) { EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB, @@ -4982,7 +4981,7 @@ return nullptr; SDDbgValue *SDV; - if (isa(V) || isa(V) || isa(V)) { + if (isoneof(V)) { SDV = DAG.getConstantDbgValue(Variable, Expression, V, Offset, dl, SDNodeOrder); DAG.AddDbgValue(SDV, nullptr, false); @@ -6631,8 +6630,8 @@ // If the operand is a float, integer, or vector constant, spill to a // constant pool entry to get its address. const Value *OpVal = OpInfo.CallOperandVal; - if (isa(OpVal) || isa(OpVal) || - isa(OpVal) || isa(OpVal)) { + if (isoneof( + OpVal)) { OpInfo.CallOperand = DAG.getConstantPool( cast(OpVal), TLI.getPointerTy(DAG.getDataLayout())); return Chain; Index: lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -439,7 +439,7 @@ continue; const TerminatorInst *Term = BB.getTerminator(); - if (isa(Term) || isa(Term)) + if (isoneof(Term)) continue; // Bail out if the exit block is not Return nor Unreachable. Index: lib/CodeGen/SelectionDAG/StatepointLowering.cpp =================================================================== --- lib/CodeGen/SelectionDAG/StatepointLowering.cpp +++ lib/CodeGen/SelectionDAG/StatepointLowering.cpp @@ -203,7 +203,7 @@ SDValue Incoming = Builder.getValue(IncomingValue); - if (isa(Incoming) || isa(Incoming)) { + if (isoneof(Incoming)) { // We won't need to spill this, so no need to check for previously // allocated stack slots return; Index: lib/CodeGen/SelectionDAG/TargetLowering.cpp =================================================================== --- lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -2913,7 +2913,7 @@ // that matches labels). For Functions, the type here is the type of // the result, which is not what we want to look at; leave them alone. Value *v = OpInfo.CallOperandVal; - if (isa(v) || isa(v) || isa(v)) { + if (isoneof(v)) { OpInfo.CallOperandVal = v; return; } Index: lib/IR/AsmWriter.cpp =================================================================== --- lib/IR/AsmWriter.cpp +++ lib/IR/AsmWriter.cpp @@ -242,7 +242,7 @@ for (const BasicBlock &BB : F) for (const Instruction &I : BB) for (const Value *Op : I.operands()) - if (isa(*Op) || isa(*Op)) // Visit GlobalValues. + if (isoneof(*Op)) // Visit GlobalValues. predictValueUseListOrder(Op, &F, OM, Stack); for (const BasicBlock &BB : F) for (const Instruction &I : BB) @@ -1279,7 +1279,7 @@ return; } - if (isa(CV) || isa(CV)) { + if (isoneof(CV)) { Type *ETy = CV->getType()->getVectorElementType(); Out << '<'; TypePrinter.print(ETy, Out); @@ -3146,8 +3146,7 @@ Type *TheType = Operand->getType(); // Select, Store and ShuffleVector always print all types. - if (isa(I) || isa(I) || isa(I) - || isa(I)) { + if (isoneof(I)) { PrintAllTypes = true; } else { for (unsigned i = 1, E = I.getNumOperands(); i != E; ++i) { @@ -3441,7 +3440,7 @@ TypePrinter.print(C->getType(), OS); OS << ' '; WriteConstantInternal(OS, C, TypePrinter, MST.getMachine(), nullptr); - } else if (isa(this) || isa(this)) { + } else if (isoneof(this)) { this->printAsOperand(OS, /* PrintType */ true, MST); } else { llvm_unreachable("Unknown value to print out!"); Index: lib/IR/BasicBlock.cpp =================================================================== --- lib/IR/BasicBlock.cpp +++ lib/IR/BasicBlock.cpp @@ -184,7 +184,7 @@ const Instruction* BasicBlock::getFirstNonPHIOrDbgOrLifetime() const { for (const Instruction &I : *this) { - if (isa(I) || isa(I)) + if (isoneof(I)) continue; if (auto *II = dyn_cast(&I)) Index: lib/IR/ConstantFold.cpp =================================================================== --- lib/IR/ConstantFold.cpp +++ lib/IR/ConstantFold.cpp @@ -152,7 +152,7 @@ // Canonicalize scalar-to-vector bitcasts into vector-to-vector bitcasts // This allows for other simplifications (although some of them // can only be handled by Analysis/ConstantFolding.cpp). - if (isa(V) || isa(V)) + if (isoneof(V)) return ConstantExpr::getBitCast(ConstantVector::get(V), DestPTy); } @@ -566,7 +566,7 @@ // If the cast operand is a constant vector, perform the cast by // operating on each element. In the cast of bitcasts, the element // count may be mismatched; don't attempt to handle that here. - if ((isa(V) || isa(V)) && + if ((isoneof(V)) && DestTy->isVectorTy() && DestTy->getVectorNumElements() == V->getType()->getVectorNumElements()) { SmallVector res; @@ -1511,7 +1511,7 @@ return ICmpInst::ICMP_NE; } else { // Block addresses aren't null, don't equal the address of globals. - assert((isa(V2) || isa(V2)) && + assert((isoneof(V2)) && "Canonicalization guarantee!"); return ICmpInst::ICMP_NE; } Index: lib/IR/Constants.cpp =================================================================== --- lib/IR/Constants.cpp +++ lib/IR/Constants.cpp @@ -98,8 +98,8 @@ // constant zero is zero for aggregates, cpnull is null for pointers, none for // tokens. - return isa(this) || isa(this) || - isa(this); + return isoneof( + this); } bool Constant::isAllOnesValue() const { @@ -1016,7 +1016,7 @@ Constant *ConstantVector::getSplat(unsigned NumElts, Constant *V) { // If this splat is compatible with ConstantDataVector, use it instead of // ConstantVector. - if ((isa(V) || isa(V)) && + if (isoneof(V) && ConstantDataSequential::isElementTypeCompatible(V->getType())) return ConstantDataVector::getSplat(NumElts, V); Index: lib/IR/Core.cpp =================================================================== --- lib/IR/Core.cpp +++ lib/IR/Core.cpp @@ -681,8 +681,8 @@ // This undoes this canonicalization, reconstructing the MDNode. static MDNode *extractMDNode(MetadataAsValue *MAV) { Metadata *MD = MAV->getMetadata(); - assert((isa(MD) || isa(MD)) && - "Expected a metadata node or a canonicalized constant"); + assert((isoneof(MD)) && + "Expected a metadata node or a canonicalized constant"); if (MDNode *N = dyn_cast(MD)) return N; @@ -707,8 +707,7 @@ LLVMValueRef LLVMIsAMDNode(LLVMValueRef Val) { if (auto *MD = dyn_cast_or_null(unwrap(Val))) - if (isa(MD->getMetadata()) || - isa(MD->getMetadata())) + if (isoneof(MD->getMetadata())) return Val; return nullptr; } Index: lib/IR/DebugInfoMetadata.cpp =================================================================== --- lib/IR/DebugInfoMetadata.cpp +++ lib/IR/DebugInfoMetadata.cpp @@ -136,8 +136,7 @@ if (auto *M = dyn_cast(this)) return M->getScope(); - assert((isa(this) || isa(this)) && - "Unhandled type of scope."); + assert((isoneof(this)) && "Unhandled type of scope."); return nullptr; } @@ -150,8 +149,7 @@ return NS->getName(); if (auto *M = dyn_cast(this)) return M->getName(); - assert((isa(this) || isa(this) || - isa(this)) && + assert((isoneof(this)) && "Unhandled type of scope."); return ""; } Index: lib/IR/DiagnosticInfo.cpp =================================================================== --- lib/IR/DiagnosticInfo.cpp +++ lib/IR/DiagnosticInfo.cpp @@ -192,7 +192,7 @@ // Only include names that correspond to user variables. FIXME: we should use // debug info if available to get the name of the user variable. - if (isa(V) || isa(V)) + if (isoneof(V)) Val = GlobalValue::getRealLinkageName(V->getName()); else if (isa(V)) { raw_string_ostream OS(Val); Index: lib/IR/Instruction.cpp =================================================================== --- lib/IR/Instruction.cpp +++ lib/IR/Instruction.cpp @@ -687,7 +687,7 @@ } void Instruction::setProfWeight(uint64_t W) { - assert((isa(this) || isa(this)) && + assert((isoneof(this)) && "Can only set weights for call and invoke instrucitons"); SmallVector Weights; Weights.push_back(W); Index: lib/IR/Instructions.cpp =================================================================== --- lib/IR/Instructions.cpp +++ lib/IR/Instructions.cpp @@ -1911,7 +1911,7 @@ return false; // Check to see if Mask is valid. - if (isa(Mask) || isa(Mask)) + if (isoneof(Mask)) return true; if (const auto *MV = dyn_cast(Mask)) { Index: lib/IR/Metadata.cpp =================================================================== --- lib/IR/Metadata.cpp +++ lib/IR/Metadata.cpp @@ -342,7 +342,7 @@ auto &Context = V->getContext(); auto *&Entry = Context.pImpl->ValuesAsMetadata[V]; if (!Entry) { - assert((isa(V) || isa(V) || isa(V)) && + assert((isoneof(V)) && "Expected constant or function-local value"); assert(!V->IsUsedByMD && "Expected this to be the only metadata use"); V->IsUsedByMD = true; Index: lib/IR/Verifier.cpp =================================================================== --- lib/IR/Verifier.cpp +++ lib/IR/Verifier.cpp @@ -642,8 +642,7 @@ Init); for (Value *Op : InitArray->operands()) { Value *V = Op->stripPointerCastsNoFollowAliases(); - Assert(isa(V) || isa(V) || - isa(V), + Assert((isoneof(V)), "invalid llvm.used member", V); Assert(V->hasName(), "members of llvm.used must be named", V); } @@ -724,7 +723,7 @@ Assert(GA.getType() == Aliasee->getType(), "Alias and aliasee types should match!", &GA); - Assert(isa(Aliasee) || isa(Aliasee), + Assert((isoneof(Aliasee)), "Aliasee should be either GlobalValue or ConstantExpr", &GA); visitAliaseeSubExpr(GA, *Aliasee); @@ -1848,7 +1847,7 @@ const CallInst *Call = dyn_cast(U); Assert(Call, "illegal use of statepoint token", &CI, U); if (!Call) continue; - Assert(isa(Call) || isa(Call), + Assert((isoneof(Call)), "gc.result or gc.relocate are the only value uses " "of a gc.statepoint", &CI, U); @@ -3154,8 +3153,7 @@ // Check that swifterror value is only used by loads, stores, or as // a swifterror argument. for (const User *U : SwiftErrorVal->users()) { - Assert(isa(U) || isa(U) || isa(U) || - isa(U), + Assert((isoneof(U)), "swifterror value can only be loaded and stored from, or " "as a swifterror argument!", SwiftErrorVal, U); @@ -3388,7 +3386,7 @@ "Catch operand does not have pointer type!", &LPI); } else { Assert(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI); - Assert(isa(Clause) || isa(Clause), + Assert((isoneof(Clause)), "Filter operand is not an array of constants!", &LPI); } } @@ -3453,7 +3451,7 @@ &CPI); auto *ParentPad = CPI.getParentPad(); - Assert(isa(ParentPad) || isa(ParentPad), + Assert((isoneof(ParentPad)), "CleanupPadInst has an invalid parent.", &CPI); visitEHPadPredecessors(CPI); @@ -3634,7 +3632,7 @@ &CatchSwitch); auto *ParentPad = CatchSwitch.getParentPad(); - Assert(isa(ParentPad) || isa(ParentPad), + Assert((isoneof(ParentPad)), "CatchSwitchInst has an invalid parent.", ParentPad); if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) { @@ -3739,7 +3737,7 @@ // Check that the instruction doesn't produce metadata. Calls are already // checked against the callee type. - Assert(!I.getType()->isMetadataTy() || isa(I) || isa(I), + Assert((!I.getType()->isMetadataTy() || isoneof(I)), "Invalid use of metadata!", &I); // Check that all uses of the instruction, if they are instructions @@ -3830,7 +3828,7 @@ } if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) { - Assert(isa(I) || isa(I) || isa(I), + Assert((isoneof(I)), "Ranges are only for loads, calls and invokes!", &I); visitRangeMetadata(I, Range, I.getType()); } @@ -3944,9 +3942,9 @@ "info argument of llvm.coro.begin must refer to an initialized " "constant"); Constant *Init = GV->getInitializer(); - Assert(isa(Init) || isa(Init), - "info argument of llvm.coro.begin must refer to either a struct or " - "an array"); + Assert((isoneof(Init)), + "info argument of llvm.coro.begin must refer to either a struct or " + "an array"); break; } case Intrinsic::ctlz: // llvm.ctlz @@ -4785,9 +4783,8 @@ } bool TBAAVerifier::visitTBAAMetadata(Instruction &I, const MDNode *MD) { - AssertTBAA(isa(I) || isa(I) || isa(I) || - isa(I) || isa(I) || - isa(I), + AssertTBAA((isoneof(I)), "TBAA is only for loads, stores and calls!", &I); bool IsStructPathTBAA = Index: lib/MC/MCExpr.cpp =================================================================== --- lib/MC/MCExpr.cpp +++ lib/MC/MCExpr.cpp @@ -81,7 +81,7 @@ const MCBinaryExpr &BE = cast(*this); // Only print parens around the LHS if it is non-trivial. - if (isa(BE.getLHS()) || isa(BE.getLHS())) { + if (isoneof(BE.getLHS())) { BE.getLHS()->print(OS, MAI); } else { OS << '('; @@ -122,7 +122,7 @@ } // Only print parens around the LHS if it is non-trivial. - if (isa(BE.getRHS()) || isa(BE.getRHS())) { + if (isoneof(BE.getRHS())) { BE.getRHS()->print(OS, MAI); } else { OS << '('; Index: lib/Target/AArch64/AArch64FastISel.cpp =================================================================== --- lib/Target/AArch64/AArch64FastISel.cpp +++ lib/Target/AArch64/AArch64FastISel.cpp @@ -307,7 +307,7 @@ /// \brief Check if the sign-/zero-extend will be a noop. static bool isIntExtFree(const Instruction *I) { - assert((isa(I) || isa(I)) && + assert((isoneof(I)) && "Unexpected integer extend instruction."); assert(!I->getType()->isVectorTy() && I->getType()->isIntegerTy() && "Unexpected value type."); @@ -4475,7 +4475,7 @@ } bool AArch64FastISel::selectIntExt(const Instruction *I) { - assert((isa(I) || isa(I)) && + assert((isoneof(I)) && "Unexpected integer extend instruction."); MVT RetVT; MVT SrcVT; Index: lib/Target/AArch64/AArch64ISelDAGToDAG.cpp =================================================================== --- lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -863,7 +863,7 @@ // We don't want to match immediate adds here, because they are better lowered // to the register-immediate addressing modes. - if (isa(LHS) || isa(RHS)) + if (isoneof(RHS)) return false; // Check if this particular node is reused in any non-memory related Index: lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- lib/Target/AArch64/AArch64ISelLowering.cpp +++ lib/Target/AArch64/AArch64ISelLowering.cpp @@ -6646,10 +6646,10 @@ continue; if (i > 0) isOnlyLowElement = false; - if (!isa(V) && !isa(V)) + if (!isoneof(V)) isConstant = false; - if (isa(V) || isa(V)) { + if (isoneof(V)) { ++NumConstantLanes; if (!ConstantValue.getNode()) ConstantValue = V; Index: lib/Target/AArch64/AArch64PromoteConstant.cpp =================================================================== --- lib/Target/AArch64/AArch64PromoteConstant.cpp +++ lib/Target/AArch64/AArch64PromoteConstant.cpp @@ -542,7 +542,7 @@ // There is no point in promoting global values as they are already // global. Do not promote constant expressions either, as they may // require some code expansion. - if (!Cst || isa(Cst) || isa(Cst)) + if (!Cst || isoneof(Cst)) continue; // Check if this constant is worth promoting. Index: lib/Target/AArch64/AArch64TargetTransformInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -210,8 +210,7 @@ // second operand must be a sign- or zero extend having a single user. We // only consider extends having a single user because they may otherwise not // be eliminated. - if (Args.size() != 2 || - (!isa(Args[1]) && !isa(Args[1])) || + if (Args.size() != 2 || !isoneof(Args[1]) || !Args[1]->hasOneUse()) return false; auto *Extend = cast(Args[1]); Index: lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp +++ lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp @@ -141,7 +141,7 @@ bool NotClobbered = isKernelFunc && !isClobberedInFunction(&I); Instruction *PtrI = dyn_cast(Ptr); if (!PtrI && NotClobbered && isGlobalLoad(I)) { - if (isa(Ptr) || isa(Ptr)) { + if (isoneof(Ptr)) { // Lookup for the existing GEP if (noClobberClones.count(Ptr)) { PtrI = noClobberClones[Ptr]; Index: lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp +++ lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp @@ -223,8 +223,7 @@ // Sometimes LDS instructions have constant pointers. // If Ptr is null, then that means this mem operand contains a // PseudoSourceValue like GOT. - if (!Ptr || isa(Ptr) || isa(Ptr) || - isa(Ptr) || isa(Ptr)) + if (!Ptr || isoneof(Ptr)) return true; const Instruction *I = dyn_cast(Ptr); Index: lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp +++ lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp @@ -476,7 +476,7 @@ // atomic operation refers to the same address in each thread, then each // thread after the first sees the value written by the previous thread as // original value. - if (isa(V) || isa(V)) + if (isoneof(V)) return true; if (const IntrinsicInst *Intrinsic = dyn_cast(V)) @@ -483,7 +483,7 @@ return isIntrinsicSourceOfDivergence(Intrinsic); // Assume all function calls are a source of divergence. - if (isa(V) || isa(V)) + if (isoneof(V)) return true; return false; Index: lib/Target/AMDGPU/R600ISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/R600ISelLowering.cpp +++ lib/Target/AMDGPU/R600ISelLowering.cpp @@ -1430,8 +1430,8 @@ ((LoadNode->getExtensionType() == ISD::NON_EXTLOAD) || (LoadNode->getExtensionType() == ISD::ZEXTLOAD))) { SDValue Result; - if (isa(LoadNode->getMemOperand()->getValue()) || - isa(LoadNode->getMemOperand()->getValue()) || + if (isoneof( + LoadNode->getMemOperand()->getValue()) || isa(Ptr)) { SDValue Slots[4]; for (unsigned i = 0; i < 4; i++) { Index: lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp =================================================================== --- lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp +++ lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp @@ -724,8 +724,7 @@ // Sometimes LDS instructions have constant pointers. // If Ptr is null, then that means this mem operand contains a // PseudoSourceValue like GOT. - if (!Ptr || isa(Ptr) || isa(Ptr) || - isa(Ptr) || isa(Ptr)) + if (!Ptr || isoneof(Ptr)) return true; const Instruction *I = dyn_cast(Ptr); Index: lib/Target/AVR/AVRISelDAGToDAG.cpp =================================================================== --- lib/Target/AVR/AVRISelDAGToDAG.cpp +++ lib/Target/AVR/AVRISelDAGToDAG.cpp @@ -328,8 +328,7 @@ SDValue BasePtr = ST->getBasePtr(); // Early exit when the base pointer is a frame index node or a constant. - if (isa(BasePtr) || isa(BasePtr) || - BasePtr.isUndef()) { + if (isoneof(BasePtr) || BasePtr.isUndef()) { return false; } Index: lib/Target/Hexagon/HexagonCommonGEP.cpp =================================================================== --- lib/Target/Hexagon/HexagonCommonGEP.cpp +++ lib/Target/Hexagon/HexagonCommonGEP.cpp @@ -820,7 +820,7 @@ } bool HexagonCommonGEP::isInvariantIn(Value *Val, Loop *L) { - if (isa(Val) || isa(Val)) + if (isoneof(Val)) return true; Instruction *In = dyn_cast(Val); if (!In) @@ -1258,7 +1258,7 @@ // For now bail out on C++ exception handling. for (Function::iterator A = F.begin(), Z = F.end(); A != Z; ++A) for (BasicBlock::iterator I = A->begin(), E = A->end(); I != E; ++I) - if (isa(I) || isa(I)) + if (isoneof(I)) return false; Fn = &F; Index: lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp =================================================================== --- lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp +++ lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp @@ -2223,7 +2223,7 @@ // instructions in it that are not involved in the original set Insts. for (auto *B : L->blocks()) { for (auto &In : *B) { - if (isa(In) || isa(In)) + if (isoneof(In)) continue; if (!Worklist.count(&In) && In.mayHaveSideEffects()) return false; Index: lib/Target/Mips/Mips16ISelDAGToDAG.cpp =================================================================== --- lib/Target/Mips/Mips16ISelDAGToDAG.cpp +++ lib/Target/Mips/Mips16ISelDAGToDAG.cpp @@ -152,8 +152,8 @@ if (Addr.getOperand(1).getOpcode() == MipsISD::Lo || Addr.getOperand(1).getOpcode() == MipsISD::GPRel) { SDValue Opnd0 = Addr.getOperand(1).getOperand(0); - if (isa(Opnd0) || isa(Opnd0) || - isa(Opnd0)) { + if (isoneof( + Opnd0)) { Base = Addr.getOperand(0); Offset = Opnd0; return true; Index: lib/Target/Mips/MipsSEISelDAGToDAG.cpp =================================================================== --- lib/Target/Mips/MipsSEISelDAGToDAG.cpp +++ lib/Target/Mips/MipsSEISelDAGToDAG.cpp @@ -367,8 +367,8 @@ if (Addr.getOperand(1).getOpcode() == MipsISD::Lo || Addr.getOperand(1).getOpcode() == MipsISD::GPRel) { SDValue Opnd0 = Addr.getOperand(1).getOperand(0); - if (isa(Opnd0) || isa(Opnd0) || - isa(Opnd0)) { + if (isoneof( + Opnd0)) { Base = Addr.getOperand(0); Offset = Opnd0; return true; Index: lib/Target/NVPTX/NVPTXAsmPrinter.cpp =================================================================== --- lib/Target/NVPTX/NVPTXAsmPrinter.cpp +++ lib/Target/NVPTX/NVPTXAsmPrinter.cpp @@ -1976,7 +1976,7 @@ case Type::ArrayTyID: case Type::VectorTyID: case Type::StructTyID: { - if (isa(CPV) || isa(CPV)) { + if (isoneof(CPV)) { int ElementSize = DL.getTypeAllocSize(CPV->getType()); bufferAggregateConstant(CPV, aggBuffer); if (Bytes > ElementSize) @@ -2010,7 +2010,7 @@ } // Old constants - if (isa(CPV) || isa(CPV)) { + if (isoneof(CPV)) { if (CPV->getNumOperands()) for (unsigned i = 0, e = CPV->getNumOperands(); i != e; ++i) bufferLEByte(cast(CPV->getOperand(i)), 0, aggBuffer); @@ -2271,8 +2271,8 @@ const MCBinaryExpr &BE = cast(Expr); // Only print parens around the LHS if it is non-trivial. - if (isa(BE.getLHS()) || isa(BE.getLHS()) || - isa(BE.getLHS())) { + if (isoneof( + BE.getLHS())) { printMCExpr(*BE.getLHS(), OS); } else { OS << '('; @@ -2296,7 +2296,7 @@ } // Only print parens around the LHS if it is non-trivial. - if (isa(BE.getRHS()) || isa(BE.getRHS())) { + if (isoneof(BE.getRHS())) { printMCExpr(*BE.getRHS(), OS); } else { OS << '('; Index: lib/Target/PowerPC/PPCBoolRetToInt.cpp =================================================================== --- lib/Target/PowerPC/PPCBoolRetToInt.cpp +++ lib/Target/PowerPC/PPCBoolRetToInt.cpp @@ -134,13 +134,11 @@ SmallVector ToRemove; for (const PHINode *P : Promotable) { // Condition 2 and 3 - auto IsValidUser = [] (const Value *V) -> bool { - return isa(V) || isa(V) || isa(V) || - isa(V); + auto IsValidUser = [](const Value *V) -> bool { + return isoneof(V); }; - auto IsValidOperand = [] (const Value *V) -> bool { - return isa(V) || isa(V) || isa(V) || - isa(V); + auto IsValidOperand = [](const Value *V) -> bool { + return isoneof(V); }; const auto &Users = P->users(); const auto &Operands = P->operands(); Index: lib/Target/PowerPC/PPCCTRLoops.cpp =================================================================== --- lib/Target/PowerPC/PPCCTRLoops.cpp +++ lib/Target/PowerPC/PPCCTRLoops.cpp @@ -401,8 +401,7 @@ J->getType()->getScalarType()->isPPC_FP128Ty()) { // Most operations on ppc_f128 values become calls. return true; - } else if (isa(J) || isa(J) || - isa(J) || isa(J)) { + } else if (isoneof(J)) { CastInst *CI = cast(J); if (CI->getSrcTy()->getScalarType()->isPPC_FP128Ty() || CI->getDestTy()->getScalarType()->isPPC_FP128Ty() || @@ -424,7 +423,7 @@ // Only on PPC32, for 128-bit integers (specifically not 64-bit // integers), these might be runtime calls. return true; - } else if (isa(J) || isa(J)) { + } else if (isoneof(J)) { // On PowerPC, indirect jumps use the counter register. return true; } else if (SwitchInst *SI = dyn_cast(J)) { Index: lib/Target/PowerPC/PPCISelDAGToDAG.cpp =================================================================== --- lib/Target/PowerPC/PPCISelDAGToDAG.cpp +++ lib/Target/PowerPC/PPCISelDAGToDAG.cpp @@ -3360,7 +3360,7 @@ SDNode *Tmp = CurDAG->getMachineNode(PPC::ADDIStocHA, dl, MVT::i64, TOCbase, GA); - if (isa(GA) || isa(GA) || + if (isoneof(GA) || CModel == CodeModel::Large) { SDNode *MN = CurDAG->getMachineNode(PPC::LDtocL, dl, MVT::i64, GA, SDValue(Tmp, 0)); Index: lib/Target/PowerPC/PPCISelLowering.cpp =================================================================== --- lib/Target/PowerPC/PPCISelLowering.cpp +++ lib/Target/PowerPC/PPCISelLowering.cpp @@ -2077,7 +2077,7 @@ // those situations here, and try with swapped Base/Offset instead. bool Swap = false; - if (isa(Base) || isa(Base)) + if (isoneof(Base)) Swap = true; else if (!isLoad) { SDValue Val = cast(N)->getValue(); Index: lib/Target/SystemZ/SystemZISelLowering.cpp =================================================================== --- lib/Target/SystemZ/SystemZISelLowering.cpp +++ lib/Target/SystemZ/SystemZISelLowering.cpp @@ -552,7 +552,7 @@ // values (vector load / store instructions only support small // offsets). - assert (isa(I) || isa(I)); + assert((isoneof(I))); Type *MemAccessTy = (isa(I) ? I->getType() : I->getOperand(0)->getType()); bool IsFPAccess = MemAccessTy->isFloatingPointTy(); Index: lib/Target/SystemZ/SystemZTargetTransformInfo.cpp =================================================================== --- lib/Target/SystemZ/SystemZTargetTransformInfo.cpp +++ lib/Target/SystemZ/SystemZTargetTransformInfo.cpp @@ -246,8 +246,7 @@ unsigned NumStores = 0; for (auto &BB : L->blocks()) for (auto &I : *BB) { - if (isa(&I) || isa(&I)) { - ImmutableCallSite CS(&I); + if (ImmutableCallSite CS{&I}) { if (const Function *F = CS.getCalledFunction()) { if (isLoweredToCall(F)) HasCall = true; Index: lib/Target/X86/X86FastISel.cpp =================================================================== --- lib/Target/X86/X86FastISel.cpp +++ lib/Target/X86/X86FastISel.cpp @@ -3378,7 +3378,7 @@ SrcAM.Base.Reg = ArgReg; if (!TryEmitSmallMemcpy(AM, SrcAM, Flags.getByValSize())) return false; - } else if (isa(ArgVal) || isa(ArgVal)) { + } else if (isoneof(ArgVal)) { // If this is a really simple value, emit this with the Value* version // of X86FastEmitStore. If it isn't simple, we don't want to do this, // as it can cause us to reevaluate the argument. Index: lib/Transforms/Coroutines/CoroFrame.cpp =================================================================== --- lib/Transforms/Coroutines/CoroFrame.cpp +++ lib/Transforms/Coroutines/CoroFrame.cpp @@ -550,15 +550,14 @@ // Check for instructions that we can recreate on resume as opposed to spill // the result into a coroutine frame. static bool materializable(Instruction &V) { - return isa(&V) || isa(&V) || - isa(&V) || isa(&V) || isa(&V); + return isoneof(&V); } // Check for structural coroutine intrinsics that should not be spilled into // the coroutine frame. static bool isCoroutineStructureIntrinsic(Instruction &I) { - return isa(&I) || isa(&I) || - isa(&I) || isa(&I); + return isoneof(&I); } // For every use of the value that is across suspend point, recreate that value Index: lib/Transforms/Coroutines/CoroInstr.h =================================================================== --- lib/Transforms/Coroutines/CoroInstr.h +++ lib/Transforms/Coroutines/CoroInstr.h @@ -111,7 +111,7 @@ ConstantPointerNull::get(Type::getInt8PtrTy(getContext()))); if (isa(Arg)) return; - assert((isa(Arg) || isa(Arg)) && + assert((isoneof(Arg)) && "unexpected instruction designating the promise"); // TODO: Add a check that any remaining users of Inst are after coro.begin // or add code to move the users after coro.begin. Index: lib/Transforms/IPO/ArgumentPromotion.cpp =================================================================== --- lib/Transforms/IPO/ArgumentPromotion.cpp +++ lib/Transforms/IPO/ArgumentPromotion.cpp @@ -762,7 +762,7 @@ while (!WorkList.empty()) { Value *V = WorkList.back(); WorkList.pop_back(); - if (isa(V) || isa(V)) { + if (isoneof(V)) { if (PtrValues.insert(V).second) WorkList.insert(WorkList.end(), V->user_begin(), V->user_end()); } else if (StoreInst *Store = dyn_cast(V)) { Index: lib/Transforms/IPO/GlobalOpt.cpp =================================================================== --- lib/Transforms/IPO/GlobalOpt.cpp +++ lib/Transforms/IPO/GlobalOpt.cpp @@ -122,8 +122,7 @@ return true; if (!V->hasOneUse()) return false; - if (isa(V) || isa(V) || isa(V) || - isa(V)) + if (isoneof(V)) return false; if (isAllocationFn(V, TLI)) return true; @@ -639,8 +638,7 @@ SI->setOperand(1, NewV); Changed = true; } - } else if (isa(I) || isa(I)) { - CallSite CS(I); + } else if (CallSite CS{I}) { if (CS.getCalledValue() == V) { // Calling through the pointer! Turn into a direct call, but be careful // that the pointer is not also being passed as an argument. @@ -724,10 +722,8 @@ // If we get here we could have other crazy uses that are transitively // loaded. - assert((isa(GlobalUser) || isa(GlobalUser) || - isa(GlobalUser) || isa(GlobalUser) || - isa(GlobalUser) || - isa(GlobalUser)) && + assert((isoneof(GlobalUser)) && "Only expect load and stores!"); } } @@ -912,7 +908,7 @@ for (const User *U : V->users()) { const Instruction *Inst = cast(U); - if (isa(Inst) || isa(Inst)) { + if (isoneof(Inst)) { continue; // Fine, ignore. } @@ -1607,7 +1603,7 @@ StoreVal = new LoadInst(NewGV, LI->getName()+".b", false, 0, LI->getOrdering(), LI->getSynchScope(), LI); } else { - assert((isa(StoredVal) || isa(StoredVal)) && + assert((isoneof(StoredVal)) && "This is not a form that we understand!"); StoreVal = StoredVal->getOperand(0); assert(isa(StoreVal) && "Not a load of NewGV!"); Index: lib/Transforms/IPO/IPConstantPropagation.cpp =================================================================== --- lib/Transforms/IPO/IPConstantPropagation.cpp +++ lib/Transforms/IPO/IPConstantPropagation.cpp @@ -174,7 +174,7 @@ continue; // Try to see if all the rets return the same constant or argument. - if (isa(V) || isa(V)) { + if (isoneof(V)) { if (isa(RV)) { // No value found yet? Try the current one. RetVals[i] = V; Index: lib/Transforms/IPO/SampleProfile.cpp =================================================================== --- lib/Transforms/IPO/SampleProfile.cpp +++ lib/Transforms/IPO/SampleProfile.cpp @@ -475,14 +475,13 @@ // Ignore all intrinsics and branch instructions. // Branch instruction usually contains debug info from sources outside of // the residing basic block, thus we ignore them during annotation. - if (isa(Inst) || isa(Inst)) + if (isoneof(Inst)) return std::error_code(); // If a call/invoke instruction is inlined in profile, but not inlined here, // it means that the inlined callsite has no sample, thus the call // instruction should have 0 count. - if ((isa(Inst) || isa(Inst)) && - findCalleeFunctionSamples(Inst)) + if ((isoneof(Inst)) && findCalleeFunctionSamples(Inst)) return 0; const DILocation *DIL = DLoc; @@ -676,8 +675,8 @@ SmallVector Candidates; for (auto &I : BB.getInstList()) { const FunctionSamples *FS = nullptr; - if ((isa(I) || isa(I)) && - !isa(I) && (FS = findCalleeFunctionSamples(I))) { + if (isoneof(I) && !isa(I) && + (FS = findCalleeFunctionSamples(I))) { Candidates.push_back(&I); if (callsiteIsHot(Samples, FS)) Hot = true; Index: lib/Transforms/InstCombine/InstCombineCalls.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineCalls.cpp +++ lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -84,7 +84,7 @@ IntegerType *BoolTy = Type::getInt1Ty(V->getContext()); for (unsigned I = 0, E = V->getNumElements(); I != E; ++I) { Constant *Elt = V->getElementAsConstant(I); - assert((isa(Elt) || isa(Elt)) && + assert((isoneof(Elt)) && "Unexpected constant data vector element type"); bool Sign = V->getElementType()->isIntegerTy() ? cast(Elt)->isNegative() @@ -2989,7 +2989,7 @@ bool AllEltsOk = true; for (unsigned i = 0; i != 16; ++i) { Constant *Elt = Mask->getAggregateElement(i); - if (!Elt || !(isa(Elt) || isa(Elt))) { + if (!Elt || !isoneof(Elt)) { AllEltsOk = false; break; } @@ -3560,7 +3560,7 @@ // If the stack restore is in a return, resume, or unwind block and if there // are no allocas or calls between the restore and the return, nuke the // restore. - if (!CannotRemove && (isa(TI) || isa(TI))) + if (!CannotRemove && isoneof(TI)) return eraseInstFromFunction(CI); break; } @@ -3902,7 +3902,7 @@ } } - if (isa(Callee) || isa(Callee)) { + if (isoneof(Callee)) { // If CS does not return void then replaceAllUsesWith undef. // This allows ValueHandlers and custom metadata to adjust itself. if (!CS.getInstruction()->getType()->isVoidTy()) Index: lib/Transforms/InstCombine/InstCombineCasts.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineCasts.cpp +++ lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -311,8 +311,7 @@ // If this is an extension from the dest type, we can eliminate it, even if it // has multiple uses. - if ((isa(I) || isa(I)) && - I->getOperand(0)->getType() == Ty) + if (isoneof(I) && I->getOperand(0)->getType() == Ty) return true; // We can't extend or shrink something that has multiple uses: doing so would @@ -2126,7 +2125,7 @@ // If this is a cast from an integer to vector, check to see if the input // is a trunc or zext of a bitcast from vector. If so, we can replace all // the casts with a shuffle and (potentially) a bitcast. - if (isa(Src) || isa(Src)) { + if (isoneof(Src)) { CastInst *SrcCast = cast(Src); if (BitCastInst *BCIn = dyn_cast(SrcCast->getOperand(0))) if (isa(BCIn->getOperand(0)->getType())) Index: lib/Transforms/InstCombine/InstCombineCompares.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineCompares.cpp +++ lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -631,7 +631,7 @@ // the base. Therefore we can't do this transformation. return false; - if (isa(V) || isa(V)) { + if (isoneof(V)) { auto *CI = dyn_cast(V); if (!CI->isNoopCast(DL)) return false; @@ -1087,8 +1087,7 @@ const Value *V = U->getUser(); --MaxIter; - if (isa(V) || isa(V) || isa(V) || - isa(V)) { + if (isoneof(V)) { // Track the uses. } else if (isa(V)) { // Loading from the pointer doesn't escape it. @@ -4407,8 +4406,7 @@ // values. If the ptr->ptr cast can be stripped off both arguments, we do so // now. if (BitCastInst *CI = dyn_cast(Op0)) { - if (Op0->getType()->isPointerTy() && - (isa(Op1) || isa(Op1))) { + if (Op0->getType()->isPointerTy() && isoneof(Op1)) { // We keep moving the cast from the left operand over to the right // operand, where it can often be eliminated completely. Op0 = CI->getOperand(0); @@ -4438,7 +4436,7 @@ // if (X) ... // For generality, we handle any zero-extension of any operand comparison // with a constant or another cast from the same type. - if (isa(Op1) || isa(Op1)) + if (isoneof(Op1)) if (Instruction *R = foldICmpWithCastAndCast(I)) return R; } Index: lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -75,7 +75,7 @@ continue; } - if (isa(I) || isa(I)) { + if (isoneof(I)) { // If uses of the bitcast are ok, we are ok. ValuesToInspect.emplace_back(I, IsOffset); continue; @@ -192,7 +192,7 @@ // allocas if possible...also skip interleaved debug info // BasicBlock::iterator It(New); - while (isa(*It) || isa(*It)) + while (isoneof(*It)) ++It; // Now that I is pointing to the first non-allocation-inst in the block, @@ -262,7 +262,7 @@ for (auto P : Path) replace(P); replace(Inst); - } else if (isa(Inst) || isa(Inst)) { + } else if (isoneof(Inst)) { Path.push_back(Inst); findLoadAndReplace(*Inst); Path.pop_back(); @@ -1279,10 +1279,7 @@ // its only used to compare two uses within the same basic block, which // means that they'll always either have the same value or one of them // will have an undefined value. - if (isa(A) || - isa(A) || - isa(A) || - isa(A)) + if (isoneof(A)) if (Instruction *BI = dyn_cast(B)) if (cast(A)->isIdenticalToWhenDefined(BI)) return true; Index: lib/Transforms/InstCombine/InstCombineMulDivRem.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineMulDivRem.cpp +++ lib/Transforms/InstCombine/InstCombineMulDivRem.cpp @@ -1549,7 +1549,7 @@ } // If it's a constant vector, flip any negative values positive. - if (isa(Op1) || isa(Op1)) { + if (isoneof(Op1)) { Constant *C = cast(Op1); unsigned VWidth = C->getType()->getVectorNumElements(); Index: lib/Transforms/InstCombine/InstCombinePHI.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombinePHI.cpp +++ lib/Transforms/InstCombine/InstCombinePHI.cpp @@ -43,7 +43,7 @@ /// adds all have a single use, turn this into a phi and a single binop. Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) { Instruction *FirstInst = cast(PN.getIncomingValue(0)); - assert(isa(FirstInst) || isa(FirstInst)); + assert((isoneof(FirstInst))); unsigned Opc = FirstInst->getOpcode(); Value *LHSVal = FirstInst->getOperand(0); Value *RHSVal = FirstInst->getOperand(1); @@ -510,7 +510,7 @@ if (!shouldChangeType(PN.getType(), CastSrcTy)) return nullptr; } - } else if (isa(FirstInst) || isa(FirstInst)) { + } else if (isoneof(FirstInst)) { // Can fold binop, compare or shift here if the RHS is a constant, // otherwise call FoldPHIArgBinOpIntoPHI. ConstantOp = dyn_cast(FirstInst->getOperand(1)); @@ -917,7 +917,7 @@ // are induction variable analysis (sometimes) and ADCE, which is only run // late. if (PHIUser->hasOneUse() && - (isa(PHIUser) || isa(PHIUser)) && + isoneof(PHIUser) && PHIUser->user_back() == &PN) { return replaceInstUsesWith(PN, UndefValue::get(PN.getType())); } Index: lib/Transforms/InstCombine/InstructionCombining.cpp =================================================================== --- lib/Transforms/InstCombine/InstructionCombining.cpp +++ lib/Transforms/InstCombine/InstructionCombining.cpp @@ -1361,8 +1361,7 @@ if (isa(RHS)) Shuffle = cast(RHS); if (isa(LHS)) C1 = cast(LHS); if (isa(RHS)) C1 = cast(RHS); - if (Shuffle && C1 && - (isa(C1) || isa(C1)) && + if (Shuffle && C1 && isoneof(C1) && isa(Shuffle->getOperand(1)) && Shuffle->getType() == Shuffle->getOperand(0)->getType()) { SmallVector ShMask = Shuffle->getShuffleMask(); @@ -2064,7 +2063,7 @@ replaceInstUsesWith(*C, ConstantInt::get(Type::getInt1Ty(C->getContext()), C->isFalseWhenEqual())); - } else if (isa(I) || isa(I)) { + } else if (isoneof(I)) { replaceInstUsesWith(*I, UndefValue::get(I->getType())); } eraseInstFromFunction(*I); Index: lib/Transforms/Instrumentation/BoundsChecking.cpp =================================================================== --- lib/Transforms/Instrumentation/BoundsChecking.cpp +++ lib/Transforms/Instrumentation/BoundsChecking.cpp @@ -179,9 +179,9 @@ std::vector WorkList; for (inst_iterator i = inst_begin(F), e = inst_end(F); i != e; ++i) { Instruction *I = &*i; - if (isa(I) || isa(I) || isa(I) || + if (isoneof(I) || isa(I)) - WorkList.push_back(I); + WorkList.push_back(I); } bool MadeChange = false; Index: lib/Transforms/Instrumentation/DataFlowSanitizer.cpp =================================================================== --- lib/Transforms/Instrumentation/DataFlowSanitizer.cpp +++ lib/Transforms/Instrumentation/DataFlowSanitizer.cpp @@ -843,7 +843,7 @@ Pos = I->getNextNode(); else Pos = &DFSF.F->getEntryBlock().front(); - while (isa(Pos) || isa(Pos)) + while (isoneof(Pos)) Pos = Pos->getNextNode(); IRBuilder<> IRB(Pos); Value *Ne = IRB.CreateICmpNE(V, DFSF.DFS.ZeroShadow); @@ -1058,7 +1058,7 @@ GetUnderlyingObjects(Addr, Objs, Pos->getModule()->getDataLayout()); bool AllConstants = true; for (Value *Obj : Objs) { - if (isa(Obj) || isa(Obj)) + if (isoneof(Obj)) continue; if (isa(Obj) && cast(Obj)->isConstant()) continue; Index: lib/Transforms/Instrumentation/EfficiencySanitizer.cpp =================================================================== --- lib/Transforms/Instrumentation/EfficiencySanitizer.cpp +++ lib/Transforms/Instrumentation/EfficiencySanitizer.cpp @@ -624,8 +624,8 @@ for (auto &BB : F) { for (auto &Inst : BB) { - if ((isa(Inst) || isa(Inst) || - isa(Inst) || isa(Inst)) && + if (isoneof( + Inst) && !shouldIgnoreMemoryAccess(&Inst)) LoadsAndStores.push_back(&Inst); else if (isa(Inst)) Index: lib/Transforms/Instrumentation/GCOVProfiling.cpp =================================================================== --- lib/Transforms/Instrumentation/GCOVProfiling.cpp +++ lib/Transforms/Instrumentation/GCOVProfiling.cpp @@ -531,7 +531,7 @@ // single successor, so split the entry block to make sure of that. BasicBlock &EntryBlock = F.getEntryBlock(); BasicBlock::iterator It = EntryBlock.begin(); - while (isa(*It) || isa(*It)) + while (isoneof(*It)) ++It; EntryBlock.splitBasicBlock(It); Index: lib/Transforms/Instrumentation/MemorySanitizer.cpp =================================================================== --- lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -1055,7 +1055,7 @@ /// \brief Create a dirty shadow of a given shadow type. Constant *getPoisonedShadow(Type *ShadowTy) { assert(ShadowTy); - if (isa(ShadowTy) || isa(ShadowTy)) + if (isoneof(ShadowTy)) return Constant::getAllOnesValue(ShadowTy); if (ArrayType *AT = dyn_cast(ShadowTy)) { SmallVector Vals(AT->getNumElements(), @@ -1188,7 +1188,7 @@ if (!MS.TrackOrigins) return nullptr; if (!PropagateShadow) return getCleanOrigin(); if (isa(V)) return getCleanOrigin(); - assert((isa(V) || isa(V)) && + assert((isoneof(V)) && "Unexpected value type in getOrigin()"); Value *Origin = OriginMap[V]; assert(Origin && "Missing origin"); @@ -1209,7 +1209,7 @@ if (!InsertChecks) return; #ifndef NDEBUG Type *ShadowTy = Shadow->getType(); - assert((isa(ShadowTy) || isa(ShadowTy)) && + assert((isoneof(ShadowTy)) && "Can only insert checks for integer and vector shadow types"); #endif InstrumentationList.push_back( @@ -1315,7 +1315,7 @@ } void handleCASOrRMW(Instruction &I) { - assert(isa(I) || isa(I)); + assert((isoneof(I))); IRBuilder<> IRB(&I); Value *Addr = I.getOperand(0); Index: lib/Transforms/Instrumentation/ThreadSanitizer.cpp =================================================================== --- lib/Transforms/Instrumentation/ThreadSanitizer.cpp +++ lib/Transforms/Instrumentation/ThreadSanitizer.cpp @@ -423,9 +423,9 @@ for (auto &Inst : BB) { if (isAtomic(&Inst)) AtomicAccesses.push_back(&Inst); - else if (isa(Inst) || isa(Inst)) + else if (isoneof(Inst)) LocalLoadsAndStores.push_back(&Inst); - else if (isa(Inst) || isa(Inst)) { + else if (isoneof(Inst)) { if (CallInst *CI = dyn_cast(&Inst)) maybeMarkSanitizerLibraryCallNoBuiltin(CI, TLI); if (isa(Inst)) Index: lib/Transforms/ObjCARC/ObjCARCOpts.cpp =================================================================== --- lib/Transforms/ObjCARC/ObjCARCOpts.cpp +++ lib/Transforms/ObjCARC/ObjCARCOpts.cpp @@ -1697,7 +1697,7 @@ // If the object being released is in static or stack storage, we know it's // not being managed by ObjC reference counting, so we can delete pairs // regardless of what possible decrements or uses lie between them. - bool KnownSafe = isa(Arg) || isa(Arg); + bool KnownSafe = isoneof(Arg); // A constant pointer can't be pointing to an object on the heap. It may // be reference-counted, but it won't be deleted. Index: lib/Transforms/Scalar/ADCE.cpp =================================================================== --- lib/Transforms/Scalar/ADCE.cpp +++ lib/Transforms/Scalar/ADCE.cpp @@ -300,7 +300,7 @@ } if (!isa(I)) return false; - if (RemoveControlFlowFlag && (isa(I) || isa(I))) + if (RemoveControlFlowFlag && isoneof(I)) return false; return true; } Index: lib/Transforms/Scalar/DeadStoreElimination.cpp =================================================================== --- lib/Transforms/Scalar/DeadStoreElimination.cpp +++ lib/Transforms/Scalar/DeadStoreElimination.cpp @@ -651,7 +651,7 @@ // If the kill pointer can be easily reduced to an alloca, don't bother doing // extraneous AA queries. - if (isa(UnderlyingPointer) || isa(UnderlyingPointer)) { + if (isoneof(UnderlyingPointer)) { DeadStackObjects.remove(const_cast(UnderlyingPointer)); return; } Index: lib/Transforms/Scalar/EarlyCSE.cpp =================================================================== --- lib/Transforms/Scalar/EarlyCSE.cpp +++ lib/Transforms/Scalar/EarlyCSE.cpp @@ -69,11 +69,9 @@ // This can only handle non-void readnone functions. if (CallInst *CI = dyn_cast(Inst)) return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy(); - return isa(Inst) || isa(Inst) || - isa(Inst) || isa(Inst) || - isa(Inst) || isa(Inst) || - isa(Inst) || isa(Inst) || - isa(Inst) || isa(Inst); + return isoneof(Inst); } }; } @@ -126,10 +124,9 @@ IVI->getOperand(1), hash_combine_range(IVI->idx_begin(), IVI->idx_end())); - assert((isa(Inst) || isa(Inst) || - isa(Inst) || isa(Inst) || - isa(Inst) || isa(Inst) || - isa(Inst)) && + assert((isoneof( + Inst)) && "Invalid/unknown instruction"); // Mix in the opcode. Index: lib/Transforms/Scalar/GVN.cpp =================================================================== --- lib/Transforms/Scalar/GVN.cpp +++ lib/Transforms/Scalar/GVN.cpp @@ -796,7 +796,7 @@ << setExtraArgs(); for (auto *U : LI->getPointerOperand()->users()) - if (U != LI && (isa(U) || isa(U)) && + if (U != LI && isoneof(U) && DT->dominates(cast(U), LI)) { // FIXME: for now give up if there are multiple memory accesses that // dominate the load. We need further analysis to decide which one is @@ -1543,7 +1543,7 @@ // Prefer a constant on the right-hand side, or an Argument if no constants. if (isa(LHS) || (isa(LHS) && !isa(RHS))) std::swap(LHS, RHS); - assert((isa(LHS) || isa(LHS)) && "Unexpected value!"); + assert((isoneof(LHS)) && "Unexpected value!"); // If there is no obvious reason to prefer the left-hand side over the // right-hand side, ensure the longest lived term is on the right-hand side, @@ -1781,7 +1781,7 @@ // Allocations are always uniquely numbered, so we can save time and memory // by fast failing them. - if (isa(I) || isa(I) || isa(I)) { + if (isoneof(I)) { addToLeaderTable(Num, I, I->getParent()); return false; } @@ -1935,7 +1935,7 @@ bool success = true; for (unsigned i = 0, e = Instr->getNumOperands(); i != e; ++i) { Value *Op = Instr->getOperand(i); - if (isa(Op) || isa(Op) || isa(Op)) + if (isoneof(Op)) continue; // This could be a newly inserted instruction, in which case, we won't // find a value number, and should give up before we hurt ourselves. @@ -1970,10 +1970,9 @@ } bool GVN::performScalarPRE(Instruction *CurInst) { - if (isa(CurInst) || isa(CurInst) || - isa(CurInst) || CurInst->getType()->isVoidTy() || - CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() || - isa(CurInst)) + if (isoneof(CurInst) || + CurInst->getType()->isVoidTy() || CurInst->mayReadFromMemory() || + CurInst->mayHaveSideEffects() || isa(CurInst)) return false; // Don't do PRE on compares. The PHI would prevent CodeGenPrepare from Index: lib/Transforms/Scalar/IndVarSimplify.cpp =================================================================== --- lib/Transforms/Scalar/IndVarSimplify.cpp +++ lib/Transforms/Scalar/IndVarSimplify.cpp @@ -1917,7 +1917,7 @@ return false; // Load and return values may be undef. - if(I->mayReadFromMemory() || isa(I) || isa(I)) + if (I->mayReadFromMemory() || isoneof(I)) return false; // Optimistically handle other instructions. Index: lib/Transforms/Scalar/LoadCombine.cpp =================================================================== --- lib/Transforms/Scalar/LoadCombine.cpp +++ lib/Transforms/Scalar/LoadCombine.cpp @@ -100,7 +100,7 @@ unsigned BitWidth = DL.getPointerSizeInBits(LI.getPointerAddressSpace()); POP.Offset = APInt(BitWidth, 0); - while (isa(POP.Pointer) || isa(POP.Pointer)) { + while (isoneof(POP.Pointer)) { if (auto *GEP = dyn_cast(POP.Pointer)) { APInt LastOffset = POP.Offset; if (!GEP->accumulateConstantOffset(DL, POP.Offset)) { Index: lib/Transforms/Scalar/LoopInterchange.cpp =================================================================== --- lib/Transforms/Scalar/LoopInterchange.cpp +++ lib/Transforms/Scalar/LoopInterchange.cpp @@ -825,7 +825,7 @@ bool FoundInduction = false; for (const Instruction &I : reverse(*InnerLoopLatch)) { - if (isa(I) || isa(I) || isa(I)) + if (isoneof(I)) continue; // We found an instruction. If this is not induction variable then it is not // safe to split this loop latch. Index: lib/Transforms/Scalar/LoopStrengthReduce.cpp =================================================================== --- lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -1189,11 +1189,9 @@ // Rough heuristic; favor registers which don't require extra setup // instructions in the preheader. - if (!isa(Reg) && - !isa(Reg) && - !(isa(Reg) && - (isa(cast(Reg)->getStart()) || - isa(cast(Reg)->getStart())))) + if (!isa(Reg) && !isa(Reg) && + !(isa(Reg) && isoneof( + cast(Reg)->getStart()))) ++SetupCost; NumIVMuls += isa(Reg) && @@ -1287,7 +1285,7 @@ // Check with target if this offset with this instruction is // specifically not supported. - if ((isa(Fixup.UserInst) || isa(Fixup.UserInst)) && + if (isoneof(Fixup.UserInst) && !TTI.isFoldableMemAccessOffset(Fixup.UserInst, Offset)) NumBaseAdds++; } @@ -5108,8 +5106,7 @@ // no good place to stick any instructions. if (auto *PN = dyn_cast(U.getUser())) { auto *FirstNonPHI = PN->getParent()->getFirstNonPHI(); - if (isa(FirstNonPHI) || - isa(FirstNonPHI)) + if (isoneof(FirstNonPHI)) for (BasicBlock *PredBB : PN->blocks()) if (isa(PredBB->getFirstNonPHI())) return; Index: lib/Transforms/Scalar/MemCpyOptimizer.cpp =================================================================== --- lib/Transforms/Scalar/MemCpyOptimizer.cpp +++ lib/Transforms/Scalar/MemCpyOptimizer.cpp @@ -566,7 +566,7 @@ return false; CallSites.push_back(CS); - } else if (isa(C) || isa(C) || isa(C)) { + } else if (isoneof(C)) { // If we can't lift this before P, it's game over. auto ML = MemoryLocation::get(C); if (AA.getModRefInfo(P, ML) != MRI_NoModRef) @@ -885,7 +885,7 @@ while (!srcUseList.empty()) { User *U = srcUseList.pop_back_val(); - if (isa(U) || isa(U)) { + if (isoneof(U)) { for (User *UU : U->users()) srcUseList.push_back(UU); continue; Index: lib/Transforms/Scalar/NewGVN.cpp =================================================================== --- lib/Transforms/Scalar/NewGVN.cpp +++ lib/Transforms/Scalar/NewGVN.cpp @@ -760,7 +760,7 @@ auto *DTN = DT->getNode(BB); if (RPOOrdering.lookup(DTN) >= PHIRPO) HasBackedge = true; - AllConstant &= isa(*U) || isa(*U); + AllConstant &= isoneof(*U); // Don't try to transform self-defined phis. if (*U == PN) @@ -836,7 +836,7 @@ "We should always have had a basic expression here"); deleteExpression(E); return createConstantExpression(C); - } else if (isa(V) || isa(V)) { + } else if (isoneof(V)) { if (I) DEBUG(dbgs() << "Simplified " << *I << " to " << " variable " << *V << "\n"); @@ -1695,7 +1695,7 @@ const Expression *E = nullptr; if (auto *C = dyn_cast(V)) E = createConstantExpression(C); - else if (isa(V) || isa(V)) { + else if (isoneof(V)) { E = createVariableExpression(V); } else { // TODO: memory intrinsics. @@ -2796,9 +2796,7 @@ // be placed anywhere) in the function. We don't do globals here // because they are often worse to put in place. // TODO: Separate cost from availability -static bool alwaysAvailable(Value *V) { - return isa(V) || isa(V); -} +static bool alwaysAvailable(Value *V) { return isoneof(V); } struct NewGVN::ValueDFS { int DFSIn = 0; Index: lib/Transforms/Scalar/RewriteStatepointsForGC.cpp =================================================================== --- lib/Transforms/Scalar/RewriteStatepointsForGC.cpp +++ lib/Transforms/Scalar/RewriteStatepointsForGC.cpp @@ -372,7 +372,7 @@ // A PHI or Select is a base defining value. The outer findBasePointer // algorithm is responsible for constructing a base value for this BDV. - assert((isa(I) || isa(I)) && + assert((isoneof(I)) && "unknown vector instruction - no base found for vector element"); return BaseDefiningValueResult(I, false); } @@ -455,7 +455,7 @@ // We assume that functions in the source language only return base // pointers. This should probably be generalized via attributes to support // both source language and internal functions. - if (isa(I) || isa(I)) + if (isoneof(I)) return BaseDefiningValueResult(I, true); // TODO: I have absolutely no idea how to implement this part yet. It's not @@ -496,7 +496,7 @@ // return a value which dynamically selects from among several base // derived pointers (each with it's own base potentially). It's the job of // the caller to resolve these. - assert((isa(I) || isa(I)) && + assert((isoneof(I)) && "missing instruction case in findBaseDefiningValing"); return BaseDefiningValueResult(I, false); } @@ -680,9 +680,8 @@ #ifndef NDEBUG auto isExpectedBDVType = [](Value *BDV) { - return isa(BDV) || isa(BDV) || - isa(BDV) || isa(BDV) || - isa(BDV); + return isoneof(BDV); }; #endif @@ -1994,7 +1993,7 @@ // introduce any new uses of pointers not in the liveset. // Note that it's fine to introduce new uses of pointers which were // otherwise not used after this statepoint. - assert(isa(Instr) || isa(Instr)); + assert((isoneof(Instr))); Instruction *ClonedValue = Instr->clone(); ClonedValue->insertBefore(InsertBefore); Index: lib/Transforms/Scalar/SCCP.cpp =================================================================== --- lib/Transforms/Scalar/SCCP.cpp +++ lib/Transforms/Scalar/SCCP.cpp @@ -1291,7 +1291,7 @@ // extractvalue and insertvalue don't need to be marked; they are // tracked as precisely as their operands. - if (isa(I) || isa(I)) + if (isoneof(I)) continue; // Send the results of everything else to overdefined. We could be @@ -1718,9 +1718,8 @@ if (const auto *SI = dyn_cast(UR)) { if (SI->getOperand(0) == GV || SI->isVolatile()) return true; // Storing addr of GV. - } else if (isa(UR) || isa(UR)) { + } else if (ImmutableCallSite CS{UR}) { // Make sure we are calling the function, not passing the address. - ImmutableCallSite CS(cast(UR)); if (!CS.isCallee(&U)) return true; } else if (const auto *LI = dyn_cast(UR)) { Index: lib/Transforms/Scalar/SROA.cpp =================================================================== --- lib/Transforms/Scalar/SROA.cpp +++ lib/Transforms/Scalar/SROA.cpp @@ -921,7 +921,7 @@ } void visitPHINodeOrSelectInst(Instruction &I) { - assert(isa(I) || isa(I)); + assert((isoneof(I))); if (I.use_empty()) return markAsDead(I); @@ -3980,8 +3980,7 @@ if (S.beginOffset() == 0 && S.endOffset() >= DL.getTypeAllocSize(AI.getAllocatedType())) continue; - if (isa(S.getUse()->getUser()) || - isa(S.getUse()->getUser())) { + if (isoneof(S.getUse()->getUser())) { S.makeUnsplittable(); IsSorted = false; } Index: lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp =================================================================== --- lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp +++ lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp @@ -610,7 +610,7 @@ } if (CastInst *Cast = dyn_cast(U)) { - assert((isa(Cast) || isa(Cast)) && + assert((isoneof(Cast)) && "We only traced into two types of CastInst: sext and zext"); ExtInsts.push_back(Cast); UserChain[ChainIndex] = nullptr; Index: lib/Transforms/Scalar/Sink.cpp =================================================================== --- lib/Transforms/Scalar/Sink.cpp +++ lib/Transforms/Scalar/Sink.cpp @@ -72,7 +72,7 @@ return false; } - if (isa(Inst) || isa(Inst) || Inst->isEHPad() || + if (isoneof(Inst) || Inst->isEHPad() || Inst->mayThrow()) return false; Index: lib/Transforms/Utils/CodeExtractor.cpp =================================================================== --- lib/Transforms/Utils/CodeExtractor.cpp +++ lib/Transforms/Utils/CodeExtractor.cpp @@ -61,7 +61,7 @@ // Don't hoist code containing allocas, invokes, or vastarts. for (BasicBlock::const_iterator I = BB.begin(), E = BB.end(); I != E; ++I) { - if (isa(I) || isa(I)) + if (isoneof(I)) return false; if (const CallInst *CI = dyn_cast(I)) if (const Function *F = CI->getCalledFunction()) Index: lib/Transforms/Utils/Evaluator.cpp =================================================================== --- lib/Transforms/Utils/Evaluator.cpp +++ lib/Transforms/Utils/Evaluator.cpp @@ -349,8 +349,7 @@ UndefValue::get(Ty), AI->getName())); InstResult = AllocaTmps.back().get(); DEBUG(dbgs() << "Found an alloca. Result: " << *InstResult << "\n"); - } else if (isa(CurInst) || isa(CurInst)) { - CallSite CS(&*CurInst); + } else if (CallSite CS{&*CurInst}) { // Debug info can safely be ignored here. if (isa(CS.getInstruction())) { Index: lib/Transforms/Utils/InlineFunction.cpp =================================================================== --- lib/Transforms/Utils/InlineFunction.cpp +++ lib/Transforms/Utils/InlineFunction.cpp @@ -265,7 +265,7 @@ Value *ChildUnwindDestToken; if (auto *Invoke = dyn_cast(U)) { ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI(); - } else if (isa(U) || isa(U)) { + } else if (isoneof(U)) { Instruction *ChildPad = cast(U); auto Memo = MemoMap.find(ChildPad); if (Memo == MemoMap.end()) { @@ -461,7 +461,7 @@ cast(U)->getUnwindDest()->getFirstNonPHI()) == CatchPad)) && "Expected useless pad"); - if (isa(U) || isa(U)) + if (isoneof(U)) Worklist.push_back(cast(U)); } } @@ -474,7 +474,7 @@ cast(U)->getUnwindDest()->getFirstNonPHI()) == UselessPad)) && "Expected useless pad"); - if (isa(U) || isa(U)) + if (isoneof(U)) Worklist.push_back(cast(U)); } } @@ -1007,9 +1007,9 @@ // Is this value a constant that cannot be derived from any pointer // value (we need to exclude constant expressions, for example, that // are formed from arithmetic on global symbols). - bool IsNonPtrConst = isa(V) || isa(V) || - isa(V) || - isa(V) || isa(V); + bool IsNonPtrConst = + isoneof(V); if (IsNonPtrConst) continue; Index: lib/Transforms/Utils/Local.cpp =================================================================== --- lib/Transforms/Utils/Local.cpp +++ lib/Transforms/Utils/Local.cpp @@ -1532,7 +1532,7 @@ if (auto *CI = dyn_cast(&I)) { Value *Callee = CI->getCalledValue(); - if (isa(Callee) || isa(Callee)) { + if (isoneof(Callee)) { changeToUnreachable(CI, /*UseLLVMTrap=*/false); Changed = true; break; @@ -1573,7 +1573,7 @@ if (auto *II = dyn_cast(Terminator)) { // Turn invokes that call 'nounwind' functions into ordinary calls. Value *Callee = II->getCalledValue(); - if (isa(Callee) || isa(Callee)) { + if (isoneof(Callee)) { changeToUnreachable(II, true); Changed = true; } else if (II->doesNotThrow() && canSimplifyInvokeNoUnwind(&F)) { @@ -1766,7 +1766,7 @@ // FIXME: we should try to preserve both invariant.group md if they are // different, but right now instruction can only have one invariant.group. if (auto *JMD = J->getMetadata(LLVMContext::MD_invariant_group)) - if (isa(K) || isa(K)) + if (isoneof(K)) K->setMetadata(LLVMContext::MD_invariant_group, JMD); } Index: lib/Transforms/Utils/LoopUtils.cpp =================================================================== --- lib/Transforms/Utils/LoopUtils.cpp +++ lib/Transforms/Utils/LoopUtils.cpp @@ -279,10 +279,9 @@ if (IsAPhi && Cur != Phi && !areAllUsesIn(Cur, VisitedInsts)) return false; - if (Kind == RK_IntegerMinMax && - (isa(Cur) || isa(Cur))) + if (Kind == RK_IntegerMinMax && isoneof(Cur)) ++NumCmpSelectPatternInst; - if (Kind == RK_FloatMinMax && (isa(Cur) || isa(Cur))) + if (Kind == RK_FloatMinMax && isoneof(Cur)) ++NumCmpSelectPatternInst; // Check whether we found a reduction operator. @@ -381,7 +380,7 @@ RecurrenceDescriptor::InstDesc RecurrenceDescriptor::isMinMaxSelectCmpPattern(Instruction *I, InstDesc &Prev) { - assert((isa(I) || isa(I) || isa(I)) && + assert((isoneof(I)) && "Expect a select instruction"); Instruction *Cmp = nullptr; SelectInst *Select = nullptr; Index: lib/Transforms/Utils/PredicateInfo.cpp =================================================================== --- lib/Transforms/Utils/PredicateInfo.cpp +++ lib/Transforms/Utils/PredicateInfo.cpp @@ -296,9 +296,9 @@ // are only being used in the comparison, which means they will not be useful // for us to consider for predicateinfo. // - if ((isa(Op0) || isa(Op0)) && !Op0->hasOneUse()) + if (isoneof(Op0) && !Op0->hasOneUse()) CmpOperands.push_back(Op0); - if ((isa(Op1) || isa(Op1)) && !Op1->hasOneUse()) + if (isoneof(Op1) && !Op1->hasOneUse()) CmpOperands.push_back(Op1); } @@ -426,7 +426,7 @@ void PredicateInfo::processSwitch(SwitchInst *SI, BasicBlock *BranchBB, SmallPtrSetImpl &OpsToRename) { Value *Op = SI->getCondition(); - if ((!isa(Op) && !isa(Op)) || Op->hasOneUse()) + if (!isoneof(Op) || Op->hasOneUse()) return; // Remember how many outgoing edges there are to every successor. Index: lib/Transforms/Utils/PromoteMemoryToRegister.cpp =================================================================== --- lib/Transforms/Utils/PromoteMemoryToRegister.cpp +++ lib/Transforms/Utils/PromoteMemoryToRegister.cpp @@ -319,7 +319,7 @@ for (auto UI = AI->user_begin(), UE = AI->user_end(); UI != UE;) { Instruction *I = cast(*UI); ++UI; - if (isa(I) || isa(I)) + if (isoneof(I)) continue; if (!I->getType()->isVoidTy()) { Index: lib/Transforms/Utils/SimplifyCFG.cpp =================================================================== --- lib/Transforms/Utils/SimplifyCFG.cpp +++ lib/Transforms/Utils/SimplifyCFG.cpp @@ -1732,7 +1732,7 @@ auto *T = B->getTerminator(); if (isa(T) && cast(T)->isUnconditional()) UnconditionalPreds.push_back(B); - else if ((isa(T) || isa(T)) && !Cond) + else if (isoneof(T) && !Cond) Cond = T; else return false; @@ -2578,7 +2578,7 @@ return false; } - if (!Cond || (!isa(Cond) && !isa(Cond)) || + if (!Cond || !isoneof(Cond) || Cond->getParent() != BB || !Cond->hasOneUse()) return false; @@ -2925,11 +2925,10 @@ unsigned N = 0; for (auto &I : *BB) { // Cheap instructions viable for folding. - if (isa(I) || isa(I) || - isa(I)) + if (isoneof(I)) ++N; // Free instructions. - else if (isa(I) || isa(I) || + else if (isoneof(I) || IsaBitcastOfPointerType(I)) continue; else Index: lib/Transforms/Utils/SplitModule.cpp =================================================================== --- lib/Transforms/Utils/SplitModule.cpp +++ lib/Transforms/Utils/SplitModule.cpp @@ -47,8 +47,7 @@ if (const Instruction *I = dyn_cast(U)) { const GlobalValue *F = I->getParent()->getParent(); GVtoClusterMap.unionSets(GV, F); - } else if (isa(U) || isa(U) || - isa(U)) { + } else if (isoneof(U)) { GVtoClusterMap.unionSets(GV, cast(U)); } else { llvm_unreachable("Underimplemented use case"); Index: lib/Transforms/Vectorize/BBVectorize.cpp =================================================================== --- lib/Transforms/Vectorize/BBVectorize.cpp +++ lib/Transforms/Vectorize/BBVectorize.cpp @@ -533,12 +533,12 @@ // subsequent iterations when looking for vectorization opportunities // while still tracking dependency chains that flow through those // instructions. - if (isa(V) || isa(V)) + if (isoneof(V)) return 0; // Give a load or store half of the required depth so that load/store // pairs will vectorize. - if (!Config.NoMemOpBoost && (isa(V) || isa(V))) + if (!Config.NoMemOpBoost && isoneof(V)) return Config.ReqChainDepth/2; return 1; @@ -907,8 +907,9 @@ // Currently, vector GEPs exist only with one index. if (G->getNumIndices() != 1) return false; - } else if (!(I->isBinaryOp() || isa(I) || - isa(I) || isa(I))) { + } else if (!(I->isBinaryOp() || + isoneof(I))) { return false; } @@ -1075,8 +1076,8 @@ // of constants. Value *IOp = I->getOperand(1); Value *JOp = J->getOperand(1); - if ((isa(IOp) || isa(IOp)) && - (isa(JOp) || isa(JOp))) { + if (isoneof(IOp) && + isoneof(JOp)) { Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; Constant *SplatValue = cast(IOp)->getSplatValue(); if (SplatValue != nullptr && Index: lib/Transforms/Vectorize/LoadStoreVectorizer.cpp =================================================================== --- lib/Transforms/Vectorize/LoadStoreVectorizer.cpp +++ lib/Transforms/Vectorize/LoadStoreVectorizer.cpp @@ -475,7 +475,7 @@ }); for (Instruction &I : make_range(getBoundaryInstrs(Chain))) { - if (isa(I) || isa(I)) { + if (isoneof(I)) { if (!is_contained(Chain, &I)) MemoryInstrs.push_back(&I); else Index: lib/Transforms/Vectorize/LoopVectorize.cpp =================================================================== --- lib/Transforms/Vectorize/LoopVectorize.cpp +++ lib/Transforms/Vectorize/LoopVectorize.cpp @@ -293,7 +293,7 @@ /// A helper function that returns the type of loaded or stored value. static Type *getMemInstValueType(Value *I) { - assert((isa(I) || isa(I)) && + assert((isoneof(I)) && "Expected Load or Store instruction"); if (auto *LI = dyn_cast(I)) return LI->getType(); @@ -302,7 +302,7 @@ /// A helper function that returns the alignment of load or store instruction. static unsigned getMemInstAlignment(Value *I) { - assert((isa(I) || isa(I)) && + assert((isoneof(I)) && "Expected Load or Store instruction"); if (auto *LI = dyn_cast(I)) return LI->getAlignment(); @@ -312,7 +312,7 @@ /// A helper function that returns the address space of the pointer operand of /// load or store instruction. static unsigned getMemInstAddressSpace(Value *I) { - assert((isa(I) || isa(I)) && + assert((isoneof(I)) && "Expected Load or Store instruction"); if (auto *LI = dyn_cast(I)) return LI->getPointerAddressSpace(); @@ -866,7 +866,7 @@ const Instruction *Orig) { // If the loop was versioned with memchecks, add the corresponding no-alias // metadata. - if (LVer && (isa(Orig) || isa(Orig))) + if (LVer && (isoneof(Orig))) LVer->annotateInstWithNoAlias(To, Orig); } @@ -3624,8 +3624,8 @@ namespace { struct CSEDenseMapInfo { static bool canHandle(const Instruction *I) { - return isa(I) || isa(I) || - isa(I) || isa(I); + return isoneof(I); } static inline Instruction *getEmptyKey() { return DenseMapInfo::getEmptyKey(); @@ -4680,8 +4680,7 @@ void InnerLoopVectorizer::vectorizeInstruction(Instruction &I) { // Scalarize instructions that should remain scalar after vectorization. - if (VF > 1 && - !(isa(&I) || isa(&I) || isa(&I)) && + if (VF > 1 && !isoneof(&I) && shouldScalarizeInstruction(&I)) { scalarizeInstruction(&I, Legal->isScalarWithPredication(&I)); return; @@ -5498,7 +5497,7 @@ // pointer are memory accesses, place the pointer in ScalarPtrs. Otherwise, // place the pointer in PossibleNonScalarPtrs. if (isScalarUse(MemAccess, Ptr) && all_of(I->users(), [&](User *U) { - return isa(U) || isa(U); + return isoneof(U); })) ScalarPtrs.insert(I); else @@ -5565,8 +5564,7 @@ if (all_of(Src->users(), [&](User *U) -> bool { auto *J = cast(U); return !TheLoop->contains(J) || Worklist.count(J) || - ((isa(J) || isa(J)) && - isScalarUse(J, Src)); + (isoneof(J) && isScalarUse(J, Src)); })) { Worklist.insert(Src); DEBUG(dbgs() << "LV: Found scalar instruction: " << *Src << "\n"); @@ -7335,7 +7333,7 @@ if (CInt && CInt->getValue().isPowerOf2()) Op2VP = TargetTransformInfo::OP_PowerOf2; Op2VK = TargetTransformInfo::OK_UniformConstantValue; - } else if (isa(Op2) || isa(Op2)) { + } else if (isoneof(Op2)) { Op2VK = TargetTransformInfo::OK_NonUniformConstantValue; Constant *SplatValue = cast(Op2)->getSplatValue(); if (SplatValue) { Index: lib/Transforms/Vectorize/SLPVectorizer.cpp =================================================================== --- lib/Transforms/Vectorize/SLPVectorizer.cpp +++ lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -3523,8 +3523,8 @@ // Otherwise, we need to visit the operands of the instruction. We only // handle the interesting cases from buildTree here. If an operand is an // instruction we haven't yet visited, we add it to the worklist. - else if (isa(I) || isa(I) || isa(I) || - isa(I) || isa(I) || isa(I)) { + else if (isoneof(I)) { for (Use &U : I->operands()) if (auto *J = dyn_cast(U.get())) if (!Visited.count(J)) @@ -4188,7 +4188,7 @@ IRBuilder Builder(InsertAfter->getParent(), ++BasicBlock::iterator(InsertAfter)); Instruction *I = cast(V); - assert(isa(I) || isa(I)); + assert((isoneof(I))); Instruction *Extract = cast(Builder.CreateExtractElement( VectorizedRoot, Builder.getInt32(VecIdx++)));