Index: include/llvm/Analysis/AliasAnalysis.h =================================================================== --- include/llvm/Analysis/AliasAnalysis.h +++ include/llvm/Analysis/AliasAnalysis.h @@ -98,7 +98,7 @@ /// they form a two bit matrix and bit-tests for 'mod' or 'ref' /// work with any of the possible values. -enum ModRefInfo { +enum class ModRefInfo { /// The access neither references nor modifies the value stored in memory. MRI_NoModRef = 0, /// The access may reference the value stored in memory. @@ -110,43 +110,49 @@ }; LLVM_NODISCARD inline bool isNoModRef(const ModRefInfo MRI) { - return MRI == MRI_NoModRef; + return MRI == ModRefInfo::MRI_NoModRef; } LLVM_NODISCARD inline bool isModOrRefSet(const ModRefInfo MRI) { - return MRI & MRI_ModRef; + return static_cast(MRI) & static_cast(ModRefInfo::MRI_ModRef); } LLVM_NODISCARD inline bool isModAndRefSet(const ModRefInfo MRI) { - return (MRI & MRI_ModRef) == MRI_ModRef; + return (static_cast(MRI) & static_cast(ModRefInfo::MRI_ModRef)) == + static_cast(ModRefInfo::MRI_ModRef); } LLVM_NODISCARD inline bool isModSet(const ModRefInfo MRI) { - return MRI & MRI_Mod; + return static_cast(MRI) & static_cast(ModRefInfo::MRI_Mod); } LLVM_NODISCARD inline bool isRefSet(const ModRefInfo MRI) { - return MRI & MRI_Ref; + return static_cast(MRI) & static_cast(ModRefInfo::MRI_Ref); } -LLVM_NODISCARD inline ModRefInfo setRef(const ModRefInfo MRI) { - return ModRefInfo(MRI | MRI_Ref); -} LLVM_NODISCARD inline ModRefInfo setMod(const ModRefInfo MRI) { - return ModRefInfo(MRI | MRI_Mod); + return ModRefInfo(static_cast(MRI) | + static_cast(ModRefInfo::MRI_Mod)); +} +LLVM_NODISCARD inline ModRefInfo setRef(const ModRefInfo MRI) { + return ModRefInfo(static_cast(MRI) | + static_cast(ModRefInfo::MRI_Ref)); } LLVM_NODISCARD inline ModRefInfo setModAndRef(const ModRefInfo MRI) { - return ModRefInfo(MRI | MRI_ModRef); + return ModRefInfo(static_cast(MRI) | + static_cast(ModRefInfo::MRI_ModRef)); } LLVM_NODISCARD inline ModRefInfo clearMod(const ModRefInfo MRI) { - return ModRefInfo(MRI & MRI_Ref); + return ModRefInfo(static_cast(MRI) & + static_cast(ModRefInfo::MRI_Ref)); } LLVM_NODISCARD inline ModRefInfo clearRef(const ModRefInfo MRI) { - return ModRefInfo(MRI & MRI_Mod); + return ModRefInfo(static_cast(MRI) & + static_cast(ModRefInfo::MRI_Mod)); } LLVM_NODISCARD inline ModRefInfo unionModRef(const ModRefInfo MRI1, const ModRefInfo MRI2) { - return ModRefInfo(MRI1 | MRI2); + return ModRefInfo(static_cast(MRI1) | static_cast(MRI2)); } LLVM_NODISCARD inline ModRefInfo intersectModRef(const ModRefInfo MRI1, const ModRefInfo MRI2) { - return ModRefInfo(MRI1 & MRI2); + return ModRefInfo(static_cast(MRI1) & static_cast(MRI2)); } /// The locations at which a function might access memory. @@ -176,27 +182,31 @@ /// This property corresponds to the GCC 'const' attribute. /// This property corresponds to the LLVM IR 'readnone' attribute. /// This property corresponds to the IntrNoMem LLVM intrinsic flag. - FMRB_DoesNotAccessMemory = FMRL_Nowhere | MRI_NoModRef, + FMRB_DoesNotAccessMemory = + FMRL_Nowhere | static_cast(ModRefInfo::MRI_NoModRef), /// The only memory references in this function (if it has any) are /// non-volatile loads from objects pointed to by its pointer-typed /// arguments, with arbitrary offsets. /// /// This property corresponds to the IntrReadArgMem LLVM intrinsic flag. - FMRB_OnlyReadsArgumentPointees = FMRL_ArgumentPointees | MRI_Ref, + FMRB_OnlyReadsArgumentPointees = + FMRL_ArgumentPointees | static_cast(ModRefInfo::MRI_Ref), /// The only memory references in this function (if it has any) are /// non-volatile loads and stores from objects pointed to by its /// pointer-typed arguments, with arbitrary offsets. /// /// This property corresponds to the IntrArgMemOnly LLVM intrinsic flag. - FMRB_OnlyAccessesArgumentPointees = FMRL_ArgumentPointees | MRI_ModRef, + FMRB_OnlyAccessesArgumentPointees = + FMRL_ArgumentPointees | static_cast(ModRefInfo::MRI_ModRef), /// The only memory references in this function (if it has any) are /// references of memory that is otherwise inaccessible via LLVM IR. /// /// This property corresponds to the LLVM IR inaccessiblememonly attribute. - FMRB_OnlyAccessesInaccessibleMem = FMRL_InaccessibleMem | MRI_ModRef, + FMRB_OnlyAccessesInaccessibleMem = + FMRL_InaccessibleMem | static_cast(ModRefInfo::MRI_ModRef), /// The function may perform non-volatile loads and stores of objects /// pointed to by its pointer-typed arguments, with arbitrary offsets, and @@ -205,8 +215,9 @@ /// /// This property corresponds to the LLVM IR /// inaccessiblemem_or_argmemonly attribute. - FMRB_OnlyAccessesInaccessibleOrArgMem = FMRL_InaccessibleMem | - FMRL_ArgumentPointees | MRI_ModRef, + FMRB_OnlyAccessesInaccessibleOrArgMem = + FMRL_InaccessibleMem | FMRL_ArgumentPointees | + static_cast(ModRefInfo::MRI_ModRef), /// This function does not perform any non-local stores or volatile loads, /// but may read from any memory location. @@ -214,18 +225,20 @@ /// This property corresponds to the GCC 'pure' attribute. /// This property corresponds to the LLVM IR 'readonly' attribute. /// This property corresponds to the IntrReadMem LLVM intrinsic flag. - FMRB_OnlyReadsMemory = FMRL_Anywhere | MRI_Ref, + FMRB_OnlyReadsMemory = FMRL_Anywhere | static_cast(ModRefInfo::MRI_Ref), // This function does not read from memory anywhere, but may write to any // memory location. // // This property corresponds to the LLVM IR 'writeonly' attribute. // This property corresponds to the IntrWriteMem LLVM intrinsic flag. - FMRB_DoesNotReadMemory = FMRL_Anywhere | MRI_Mod, + FMRB_DoesNotReadMemory = + FMRL_Anywhere | static_cast(ModRefInfo::MRI_Mod), /// This indicates that the function could not be classified into one of the /// behaviors above. - FMRB_UnknownModRefBehavior = FMRL_Anywhere | MRI_ModRef + FMRB_UnknownModRefBehavior = + FMRL_Anywhere | static_cast(ModRefInfo::MRI_ModRef) }; // Wrapper method strips bits significant only in FunctionModRefBehavior, @@ -234,7 +247,7 @@ // entry with all bits set to 1. LLVM_NODISCARD inline ModRefInfo createModRefInfo(const FunctionModRefBehavior FMRB) { - return ModRefInfo(FMRB & MRI_ModRef); + return ModRefInfo(FMRB & static_cast(ModRefInfo::MRI_ModRef)); } class AAResults { @@ -593,7 +606,7 @@ case Instruction::CatchRet: return getModRefInfo((const CatchReturnInst *)I, Loc); default: - return MRI_NoModRef; + return ModRefInfo::MRI_NoModRef; } } @@ -894,7 +907,7 @@ } ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) { - return MRI_ModRef; + return ModRefInfo::MRI_ModRef; } FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) { @@ -906,11 +919,11 @@ } ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) { - return MRI_ModRef; + return ModRefInfo::MRI_ModRef; } ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) { - return MRI_ModRef; + return ModRefInfo::MRI_ModRef; } }; Index: lib/Analysis/AliasAnalysis.cpp =================================================================== --- lib/Analysis/AliasAnalysis.cpp +++ lib/Analysis/AliasAnalysis.cpp @@ -119,7 +119,7 @@ } ModRefInfo AAResults::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) { - ModRefInfo Result = MRI_ModRef; + ModRefInfo Result = ModRefInfo::MRI_ModRef; for (const auto &AA : AAs) { Result = intersectModRef(Result, AA->getArgModRefInfo(CS, ArgIdx)); @@ -139,7 +139,7 @@ return getModRefInfo(CS, Call); } else if (I->isFenceLike()) { // If this is a fence, just return MRI_ModRef. - return MRI_ModRef; + return ModRefInfo::MRI_ModRef; } else { // Otherwise, check if the call modifies or references the // location this memory access defines. The best we can say @@ -150,12 +150,12 @@ if (isModOrRefSet(MR)) return setModAndRef(MR); } - return MRI_NoModRef; + return ModRefInfo::MRI_NoModRef; } ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) { - ModRefInfo Result = MRI_ModRef; + ModRefInfo Result = ModRefInfo::MRI_ModRef; for (const auto &AA : AAs) { Result = intersectModRef(Result, AA->getModRefInfo(CS, Loc)); @@ -170,7 +170,7 @@ auto MRB = getModRefBehavior(CS); if (MRB == FMRB_DoesNotAccessMemory || MRB == FMRB_OnlyAccessesInaccessibleMem) - return MRI_NoModRef; + return ModRefInfo::MRI_NoModRef; if (onlyReadsMemory(MRB)) Result = clearMod(Result); @@ -179,7 +179,7 @@ if (onlyAccessesArgPointees(MRB) || onlyAccessesInaccessibleOrArgMem(MRB)) { bool DoesAlias = false; - ModRefInfo AllArgsMask = MRI_NoModRef; + ModRefInfo AllArgsMask = ModRefInfo::MRI_NoModRef; if (doesAccessArgPointees(MRB)) { for (auto AI = CS.arg_begin(), AE = CS.arg_end(); AI != AE; ++AI) { const Value *Arg = *AI; @@ -197,7 +197,7 @@ } // Return MRI_NoModRef if no alias found with any argument. if (!DoesAlias) - return MRI_NoModRef; + return ModRefInfo::MRI_NoModRef; // Logical & between other AA analyses and argument analysis. Result = intersectModRef(Result, AllArgsMask); } @@ -212,7 +212,7 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) { - ModRefInfo Result = MRI_ModRef; + ModRefInfo Result = ModRefInfo::MRI_ModRef; for (const auto &AA : AAs) { Result = intersectModRef(Result, AA->getModRefInfo(CS1, CS2)); @@ -228,15 +228,15 @@ // If CS1 or CS2 are readnone, they don't interact. auto CS1B = getModRefBehavior(CS1); if (CS1B == FMRB_DoesNotAccessMemory) - return MRI_NoModRef; + return ModRefInfo::MRI_NoModRef; auto CS2B = getModRefBehavior(CS2); if (CS2B == FMRB_DoesNotAccessMemory) - return MRI_NoModRef; + return ModRefInfo::MRI_NoModRef; // If they both only read from memory, there is no dependence. if (onlyReadsMemory(CS1B) && onlyReadsMemory(CS2B)) - return MRI_NoModRef; + return ModRefInfo::MRI_NoModRef; // If CS1 only reads memory, the only dependence on CS2 can be // from CS1 reading memory written by CS2. @@ -249,7 +249,7 @@ // information from CS1's references to the memory referenced by // CS2's arguments. if (onlyAccessesArgPointees(CS2B)) { - ModRefInfo R = MRI_NoModRef; + ModRefInfo R = ModRefInfo::MRI_NoModRef; if (doesAccessArgPointees(CS2B)) { for (auto I = CS2.arg_begin(), E = CS2.arg_end(); I != E; ++I) { const Value *Arg = *I; @@ -263,11 +263,11 @@ // - If CS2 modifies location, dependence exists if CS1 reads or writes. // - If CS2 only reads location, dependence exists if CS1 writes. ModRefInfo ArgModRefCS2 = getArgModRefInfo(CS2, CS2ArgIdx); - ModRefInfo ArgMask = MRI_NoModRef; + ModRefInfo ArgMask = ModRefInfo::MRI_NoModRef; if (isModSet(ArgModRefCS2)) - ArgMask = MRI_ModRef; + ArgMask = ModRefInfo::MRI_ModRef; else if (isRefSet(ArgModRefCS2)) - ArgMask = MRI_Mod; + ArgMask = ModRefInfo::MRI_Mod; // ModRefCS1 indicates what CS1 might do to CS2ArgLoc, and we use // above ArgMask to update dependence info. @@ -285,7 +285,7 @@ // If CS1 only accesses memory through arguments, check if CS2 references // any of the memory referenced by CS1's arguments. If not, return NoModRef. if (onlyAccessesArgPointees(CS1B)) { - ModRefInfo R = MRI_NoModRef; + ModRefInfo R = ModRefInfo::MRI_NoModRef; if (doesAccessArgPointees(CS1B)) { for (auto I = CS1.arg_begin(), E = CS1.arg_end(); I != E; ++I) { const Value *Arg = *I; @@ -349,45 +349,45 @@ const MemoryLocation &Loc) { // Be conservative in the face of atomic. if (isStrongerThan(L->getOrdering(), AtomicOrdering::Unordered)) - return MRI_ModRef; + return ModRefInfo::MRI_ModRef; // If the load address doesn't alias the given address, it doesn't read // or write the specified memory. if (Loc.Ptr && !alias(MemoryLocation::get(L), Loc)) - return MRI_NoModRef; + return ModRefInfo::MRI_NoModRef; // Otherwise, a load just reads. - return MRI_Ref; + return ModRefInfo::MRI_Ref; } ModRefInfo AAResults::getModRefInfo(const StoreInst *S, const MemoryLocation &Loc) { // Be conservative in the face of atomic. if (isStrongerThan(S->getOrdering(), AtomicOrdering::Unordered)) - return MRI_ModRef; + return ModRefInfo::MRI_ModRef; if (Loc.Ptr) { // If the store address cannot alias the pointer in question, then the // specified memory cannot be modified by the store. if (!alias(MemoryLocation::get(S), Loc)) - return MRI_NoModRef; + return ModRefInfo::MRI_NoModRef; // If the pointer is a pointer to constant memory, then it could not have // been modified by this store. if (pointsToConstantMemory(Loc)) - return MRI_NoModRef; + return ModRefInfo::MRI_NoModRef; } // Otherwise, a store just writes. - return MRI_Mod; + return ModRefInfo::MRI_Mod; } ModRefInfo AAResults::getModRefInfo(const FenceInst *S, const MemoryLocation &Loc) { // If we know that the location is a constant memory location, the fence // cannot modify this location. if (Loc.Ptr && pointsToConstantMemory(Loc)) - return MRI_Ref; - return MRI_ModRef; + return ModRefInfo::MRI_Ref; + return ModRefInfo::MRI_ModRef; } ModRefInfo AAResults::getModRefInfo(const VAArgInst *V, @@ -396,16 +396,16 @@ // If the va_arg address cannot alias the pointer in question, then the // specified memory cannot be accessed by the va_arg. if (!alias(MemoryLocation::get(V), Loc)) - return MRI_NoModRef; + return ModRefInfo::MRI_NoModRef; // If the pointer is a pointer to constant memory, then it could not have // been modified by this va_arg. if (pointsToConstantMemory(Loc)) - return MRI_NoModRef; + return ModRefInfo::MRI_NoModRef; } // Otherwise, a va_arg reads and writes. - return MRI_ModRef; + return ModRefInfo::MRI_ModRef; } ModRefInfo AAResults::getModRefInfo(const CatchPadInst *CatchPad, @@ -414,11 +414,11 @@ // If the pointer is a pointer to constant memory, // then it could not have been modified by this catchpad. if (pointsToConstantMemory(Loc)) - return MRI_NoModRef; + return ModRefInfo::MRI_NoModRef; } // Otherwise, a catchpad reads and writes. - return MRI_ModRef; + return ModRefInfo::MRI_ModRef; } ModRefInfo AAResults::getModRefInfo(const CatchReturnInst *CatchRet, @@ -427,37 +427,37 @@ // If the pointer is a pointer to constant memory, // then it could not have been modified by this catchpad. if (pointsToConstantMemory(Loc)) - return MRI_NoModRef; + return ModRefInfo::MRI_NoModRef; } // Otherwise, a catchret reads and writes. - return MRI_ModRef; + return ModRefInfo::MRI_ModRef; } ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX, const MemoryLocation &Loc) { // Acquire/Release cmpxchg has properties that matter for arbitrary addresses. if (isStrongerThanMonotonic(CX->getSuccessOrdering())) - return MRI_ModRef; + return ModRefInfo::MRI_ModRef; // If the cmpxchg address does not alias the location, it does not access it. if (Loc.Ptr && !alias(MemoryLocation::get(CX), Loc)) - return MRI_NoModRef; + return ModRefInfo::MRI_NoModRef; - return MRI_ModRef; + return ModRefInfo::MRI_ModRef; } ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW, const MemoryLocation &Loc) { // Acquire/Release atomicrmw has properties that matter for arbitrary addresses. if (isStrongerThanMonotonic(RMW->getOrdering())) - return MRI_ModRef; + return ModRefInfo::MRI_ModRef; // If the atomicrmw address does not alias the location, it does not access it. if (Loc.Ptr && !alias(MemoryLocation::get(RMW), Loc)) - return MRI_NoModRef; + return ModRefInfo::MRI_NoModRef; - return MRI_ModRef; + return ModRefInfo::MRI_ModRef; } /// \brief Return information about whether a particular call site modifies @@ -473,26 +473,26 @@ DominatorTree *DT, OrderedBasicBlock *OBB) { if (!DT) - return MRI_ModRef; + return ModRefInfo::MRI_ModRef; const Value *Object = GetUnderlyingObject(MemLoc.Ptr, I->getModule()->getDataLayout()); if (!isIdentifiedObject(Object) || isa(Object) || isa(Object)) - return MRI_ModRef; + return ModRefInfo::MRI_ModRef; ImmutableCallSite CS(I); if (!CS.getInstruction() || CS.getInstruction() == Object) - return MRI_ModRef; + return ModRefInfo::MRI_ModRef; if (PointerMayBeCapturedBefore(Object, /* ReturnCaptures */ true, /* StoreCaptures */ true, I, DT, /* include Object */ true, /* OrderedBasicBlock */ OBB)) - return MRI_ModRef; + return ModRefInfo::MRI_ModRef; unsigned ArgNo = 0; - ModRefInfo R = MRI_NoModRef; + ModRefInfo R = ModRefInfo::MRI_NoModRef; for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end(); CI != CE; ++CI, ++ArgNo) { // Only look at the no-capture or byval pointer arguments. If this @@ -512,10 +512,10 @@ if (CS.doesNotAccessMemory(ArgNo)) continue; if (CS.onlyReadsMemory(ArgNo)) { - R = MRI_Ref; + R = ModRefInfo::MRI_Ref; continue; } - return MRI_ModRef; + return ModRefInfo::MRI_ModRef; } return R; } @@ -525,7 +525,8 @@ /// bool AAResults::canBasicBlockModify(const BasicBlock &BB, const MemoryLocation &Loc) { - return canInstructionRangeModRef(BB.front(), BB.back(), Loc, MRI_Mod); + return canInstructionRangeModRef(BB.front(), BB.back(), Loc, + ModRefInfo::MRI_Mod); } /// canInstructionRangeModRef - Return true if it is possible for the Index: lib/Analysis/AliasAnalysisEvaluator.cpp =================================================================== --- lib/Analysis/AliasAnalysisEvaluator.cpp +++ lib/Analysis/AliasAnalysisEvaluator.cpp @@ -244,20 +244,20 @@ if (ElTy->isSized()) Size = DL.getTypeStoreSize(ElTy); switch (AA.getModRefInfo(C, Pointer, Size)) { - case MRI_NoModRef: + case ModRefInfo::MRI_NoModRef: PrintModRefResults("NoModRef", PrintNoModRef, I, Pointer, F.getParent()); ++NoModRefCount; break; - case MRI_Mod: + case ModRefInfo::MRI_Mod: PrintModRefResults("Just Mod", PrintMod, I, Pointer, F.getParent()); ++ModCount; break; - case MRI_Ref: + case ModRefInfo::MRI_Ref: PrintModRefResults("Just Ref", PrintRef, I, Pointer, F.getParent()); ++RefCount; break; - case MRI_ModRef: + case ModRefInfo::MRI_ModRef: PrintModRefResults("Both ModRef", PrintModRef, I, Pointer, F.getParent()); ++ModRefCount; @@ -272,19 +272,19 @@ if (D == C) continue; switch (AA.getModRefInfo(*C, *D)) { - case MRI_NoModRef: + case ModRefInfo::MRI_NoModRef: PrintModRefResults("NoModRef", PrintNoModRef, *C, *D, F.getParent()); ++NoModRefCount; break; - case MRI_Mod: + case ModRefInfo::MRI_Mod: PrintModRefResults("Just Mod", PrintMod, *C, *D, F.getParent()); ++ModCount; break; - case MRI_Ref: + case ModRefInfo::MRI_Ref: PrintModRefResults("Just Ref", PrintRef, *C, *D, F.getParent()); ++RefCount; break; - case MRI_ModRef: + case ModRefInfo::MRI_ModRef: PrintModRefResults("Both ModRef", PrintModRef, *C, *D, F.getParent()); ++ModRefCount; break; Index: lib/Analysis/BasicAliasAnalysis.cpp =================================================================== --- lib/Analysis/BasicAliasAnalysis.cpp +++ lib/Analysis/BasicAliasAnalysis.cpp @@ -687,13 +687,13 @@ unsigned ArgIdx) { // Checking for known builtin intrinsics and target library functions. if (isWriteOnlyParam(CS, ArgIdx, TLI)) - return MRI_Mod; + return ModRefInfo::MRI_Mod; if (CS.paramHasAttr(ArgIdx, Attribute::ReadOnly)) - return MRI_Ref; + return ModRefInfo::MRI_Ref; if (CS.paramHasAttr(ArgIdx, Attribute::ReadNone)) - return MRI_NoModRef; + return ModRefInfo::MRI_NoModRef; return AAResultBase::getArgModRefInfo(CS, ArgIdx); } @@ -770,7 +770,7 @@ if (isa(Object)) if (const CallInst *CI = dyn_cast(CS.getInstruction())) if (CI->isTailCall()) - return MRI_NoModRef; + return ModRefInfo::MRI_NoModRef; // If the pointer is to a locally allocated object that does not escape, // then the call can not mod/ref the pointer unless the call takes the pointer @@ -780,7 +780,7 @@ // Optimistically assume that call doesn't touch Object and check this // assumption in the following loop. - ModRefInfo Result = MRI_NoModRef; + ModRefInfo Result = ModRefInfo::MRI_NoModRef; unsigned OperandNo = 0; for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end(); @@ -818,7 +818,7 @@ continue; } // This operand aliases 'Object' and call reads and writes into it. - Result = MRI_ModRef; + Result = ModRefInfo::MRI_ModRef; break; } @@ -838,7 +838,7 @@ // Be conservative if the accessed pointer may alias the allocation - // fallback to the generic handling below. if (getBestAAResults().alias(MemoryLocation(Inst), Loc) == NoAlias) - return MRI_NoModRef; + return ModRefInfo::MRI_NoModRef; } // The semantics of memcpy intrinsics forbid overlap between their respective @@ -851,14 +851,14 @@ if ((SrcAA = getBestAAResults().alias(MemoryLocation::getForSource(Inst), Loc)) == MustAlias) // Loc is exactly the memcpy source thus disjoint from memcpy dest. - return MRI_Ref; + return ModRefInfo::MRI_Ref; if ((DestAA = getBestAAResults().alias(MemoryLocation::getForDest(Inst), Loc)) == MustAlias) // The converse case. - return MRI_Mod; + return ModRefInfo::MRI_Mod; // It's also possible for Loc to alias both src and dest, or neither. - ModRefInfo rv = MRI_NoModRef; + ModRefInfo rv = ModRefInfo::MRI_NoModRef; if (SrcAA != NoAlias) rv = setRef(rv); if (DestAA != NoAlias) @@ -870,7 +870,7 @@ // proper control dependencies will be maintained, it never aliases any // particular memory location. if (isIntrinsicCall(CS, Intrinsic::assume)) - return MRI_NoModRef; + return ModRefInfo::MRI_NoModRef; // Like assumes, guard intrinsics are also marked as arbitrarily writing so // that proper control dependencies are maintained but they never mods any @@ -880,7 +880,7 @@ // heap state at the point the guard is issued needs to be consistent in case // the guard invokes the "deopt" continuation. if (isIntrinsicCall(CS, Intrinsic::experimental_guard)) - return MRI_Ref; + return ModRefInfo::MRI_Ref; // Like assumes, invariant.start intrinsics were also marked as arbitrarily // writing so that proper control dependencies are maintained but they never @@ -906,7 +906,7 @@ // rules of invariant.start) and print 40, while the first program always // prints 50. if (isIntrinsicCall(CS, Intrinsic::invariant_start)) - return MRI_Ref; + return ModRefInfo::MRI_Ref; // The AAResultBase base class has some smarts, lets use them. return AAResultBase::getModRefInfo(CS, Loc); @@ -919,7 +919,7 @@ // particular memory location. if (isIntrinsicCall(CS1, Intrinsic::assume) || isIntrinsicCall(CS2, Intrinsic::assume)) - return MRI_NoModRef; + return ModRefInfo::MRI_NoModRef; // Like assumes, guard intrinsics are also marked as arbitrarily writing so // that proper control dependencies are maintained but they never mod any @@ -933,12 +933,14 @@ // possibilities for guard intrinsics. if (isIntrinsicCall(CS1, Intrinsic::experimental_guard)) - return isModSet(createModRefInfo(getModRefBehavior(CS2))) ? MRI_Ref - : MRI_NoModRef; + return isModSet(createModRefInfo(getModRefBehavior(CS2))) + ? ModRefInfo::MRI_Ref + : ModRefInfo::MRI_NoModRef; if (isIntrinsicCall(CS2, Intrinsic::experimental_guard)) - return isModSet(createModRefInfo(getModRefBehavior(CS1))) ? MRI_Mod - : MRI_NoModRef; + return isModSet(createModRefInfo(getModRefBehavior(CS1))) + ? ModRefInfo::MRI_Mod + : ModRefInfo::MRI_NoModRef; // The AAResultBase base class has some smarts, lets use them. return AAResultBase::getModRefInfo(CS1, CS2); Index: lib/Analysis/GlobalsModRef.cpp =================================================================== --- lib/Analysis/GlobalsModRef.cpp +++ lib/Analysis/GlobalsModRef.cpp @@ -88,9 +88,11 @@ enum { MayReadAnyGlobal = 4 }; /// Checks to document the invariants of the bit packing here. - static_assert((MayReadAnyGlobal & MRI_ModRef) == 0, + static_assert((MayReadAnyGlobal & static_cast(ModRefInfo::MRI_ModRef)) == + 0, "ModRef and the MayReadAnyGlobal flag bits overlap."); - static_assert(((MayReadAnyGlobal | MRI_ModRef) >> + static_assert(((MayReadAnyGlobal | + static_cast(ModRefInfo::MRI_ModRef)) >> AlignedMapPointerTraits::NumLowBitsAvailable) == 0, "Insufficient low bits to store our flag and ModRef info."); @@ -127,12 +129,12 @@ /// Returns the \c ModRefInfo info for this function. ModRefInfo getModRefInfo() const { - return ModRefInfo(Info.getInt() & MRI_ModRef); + return ModRefInfo(Info.getInt() & static_cast(ModRefInfo::MRI_ModRef)); } /// Adds new \c ModRefInfo for this function to its state. void addModRefInfo(ModRefInfo NewMRI) { - Info.setInt(Info.getInt() | NewMRI); + Info.setInt(Info.getInt() | static_cast(NewMRI)); } /// Returns whether this function may read any global variable, and we don't @@ -145,7 +147,8 @@ /// Returns the \c ModRefInfo info for this function w.r.t. a particular /// global, which may be more precise than the general information above. ModRefInfo getModRefInfoForGlobal(const GlobalValue &GV) const { - ModRefInfo GlobalMRI = mayReadAnyGlobal() ? MRI_Ref : MRI_NoModRef; + ModRefInfo GlobalMRI = + mayReadAnyGlobal() ? ModRefInfo::MRI_Ref : ModRefInfo::MRI_NoModRef; if (AlignedMap *P = Info.getPointer()) { auto I = P->Map.find(&GV); if (I != P->Map.end()) @@ -298,7 +301,7 @@ Handles.emplace_front(*this, Reader); Handles.front().I = Handles.begin(); } - FunctionInfos[Reader].addModRefInfoForGlobal(GV, MRI_Ref); + FunctionInfos[Reader].addModRefInfoForGlobal(GV, ModRefInfo::MRI_Ref); } if (!GV.isConstant()) // No need to keep track of writers to constants @@ -307,7 +310,8 @@ Handles.emplace_front(*this, Writer); Handles.front().I = Handles.begin(); } - FunctionInfos[Writer].addModRefInfoForGlobal(GV, MRI_Mod); + FunctionInfos[Writer].addModRefInfoForGlobal(GV, + ModRefInfo::MRI_Mod); } ++NumNonAddrTakenGlobalVars; @@ -503,13 +507,13 @@ if (F->doesNotAccessMemory()) { // Can't do better than that! } else if (F->onlyReadsMemory()) { - FI.addModRefInfo(MRI_Ref); + FI.addModRefInfo(ModRefInfo::MRI_Ref); if (!F->isIntrinsic() && !F->onlyAccessesArgMemory()) // This function might call back into the module and read a global - // consider every global as possibly being read by this function. FI.setMayReadAnyGlobal(); } else { - FI.addModRefInfo(MRI_ModRef); + FI.addModRefInfo(ModRefInfo::MRI_ModRef); // Can't say anything useful unless it's an intrinsic - they don't // read or write global variables of the kind considered here. KnowNothing = !F->isIntrinsic(); @@ -564,7 +568,7 @@ if (isAllocationFn(&I, &TLI) || isFreeCall(&I, &TLI)) { // FIXME: It is completely unclear why this is necessary and not // handled by the above graph code. - FI.addModRefInfo(MRI_ModRef); + FI.addModRefInfo(ModRefInfo::MRI_ModRef); } else if (Function *Callee = CS.getCalledFunction()) { // The callgraph doesn't include intrinsic calls. if (Callee->isIntrinsic()) { @@ -579,9 +583,9 @@ // All non-call instructions we use the primary predicates for whether // thay read or write memory. if (I.mayReadFromMemory()) - FI.addModRefInfo(MRI_Ref); + FI.addModRefInfo(ModRefInfo::MRI_Ref); if (I.mayWriteToMemory()) - FI.addModRefInfo(MRI_Mod); + FI.addModRefInfo(ModRefInfo::MRI_Mod); } } @@ -868,8 +872,9 @@ ModRefInfo GlobalsAAResult::getModRefInfoForArgument(ImmutableCallSite CS, const GlobalValue *GV) { if (CS.doesNotAccessMemory()) - return MRI_NoModRef; - ModRefInfo ConservativeResult = CS.onlyReadsMemory() ? MRI_Ref : MRI_ModRef; + return ModRefInfo::MRI_NoModRef; + ModRefInfo ConservativeResult = + CS.onlyReadsMemory() ? ModRefInfo::MRI_Ref : ModRefInfo::MRI_ModRef; // Iterate through all the arguments to the called function. If any argument // is based on GV, return the conservative result. @@ -890,12 +895,12 @@ } // We identified all objects in the argument list, and none of them were GV. - return MRI_NoModRef; + return ModRefInfo::MRI_NoModRef; } ModRefInfo GlobalsAAResult::getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) { - ModRefInfo Known = MRI_ModRef; + ModRefInfo Known = ModRefInfo::MRI_ModRef; // If we are asking for mod/ref info of a direct call with a pointer to a // global we are tracking, return information if we have it. @@ -909,7 +914,7 @@ getModRefInfoForArgument(CS, GV)); if (!isModOrRefSet(Known)) - return MRI_NoModRef; // No need to query other mod/ref analyses + return ModRefInfo::MRI_NoModRef; // No need to query other mod/ref analyses return intersectModRef(Known, AAResultBase::getModRefInfo(CS, Loc)); } Index: lib/Analysis/MemoryDependenceAnalysis.cpp =================================================================== --- lib/Analysis/MemoryDependenceAnalysis.cpp +++ lib/Analysis/MemoryDependenceAnalysis.cpp @@ -119,38 +119,38 @@ if (const LoadInst *LI = dyn_cast(Inst)) { if (LI->isUnordered()) { Loc = MemoryLocation::get(LI); - return MRI_Ref; + return ModRefInfo::MRI_Ref; } if (LI->getOrdering() == AtomicOrdering::Monotonic) { Loc = MemoryLocation::get(LI); - return MRI_ModRef; + return ModRefInfo::MRI_ModRef; } Loc = MemoryLocation(); - return MRI_ModRef; + return ModRefInfo::MRI_ModRef; } if (const StoreInst *SI = dyn_cast(Inst)) { if (SI->isUnordered()) { Loc = MemoryLocation::get(SI); - return MRI_Mod; + return ModRefInfo::MRI_Mod; } if (SI->getOrdering() == AtomicOrdering::Monotonic) { Loc = MemoryLocation::get(SI); - return MRI_ModRef; + return ModRefInfo::MRI_ModRef; } Loc = MemoryLocation(); - return MRI_ModRef; + return ModRefInfo::MRI_ModRef; } if (const VAArgInst *V = dyn_cast(Inst)) { Loc = MemoryLocation::get(V); - return MRI_ModRef; + return ModRefInfo::MRI_ModRef; } if (const CallInst *CI = isFreeCall(Inst, &TLI)) { // calls to free() deallocate the entire structure Loc = MemoryLocation(CI->getArgOperand(0)); - return MRI_Mod; + return ModRefInfo::MRI_Mod; } if (const IntrinsicInst *II = dyn_cast(Inst)) { @@ -166,7 +166,7 @@ cast(II->getArgOperand(0))->getZExtValue(), AAInfo); // These intrinsics don't really modify the memory, but returning Mod // will allow them to be handled conservatively. - return MRI_Mod; + return ModRefInfo::MRI_Mod; case Intrinsic::invariant_end: II->getAAMetadata(AAInfo); Loc = MemoryLocation( @@ -174,7 +174,7 @@ cast(II->getArgOperand(1))->getZExtValue(), AAInfo); // These intrinsics don't really modify the memory, but returning Mod // will allow them to be handled conservatively. - return MRI_Mod; + return ModRefInfo::MRI_Mod; default: break; } @@ -182,10 +182,10 @@ // Otherwise, just do the coarse-grained thing that always works. if (Inst->mayWriteToMemory()) - return MRI_ModRef; + return ModRefInfo::MRI_ModRef; if (Inst->mayReadFromMemory()) - return MRI_Ref; - return MRI_NoModRef; + return ModRefInfo::MRI_Ref; + return ModRefInfo::MRI_NoModRef; } /// Private helper for finding the local dependencies of a call site. @@ -689,12 +689,12 @@ if (isModAndRefSet(MR)) MR = AA.callCapturesBefore(Inst, MemLoc, &DT, &OBB); switch (MR) { - case MRI_NoModRef: + case ModRefInfo::MRI_NoModRef: // If the call has no effect on the queried pointer, just ignore it. continue; - case MRI_Mod: + case ModRefInfo::MRI_Mod: return MemDepResult::getClobber(Inst); - case MRI_Ref: + case ModRefInfo::MRI_Ref: // If the call is known to never store to the pointer, and if this is a // load query, we can safely ignore it (scan past it). if (isLoad) Index: lib/Analysis/ObjCARCAliasAnalysis.cpp =================================================================== --- lib/Analysis/ObjCARCAliasAnalysis.cpp +++ lib/Analysis/ObjCARCAliasAnalysis.cpp @@ -123,7 +123,7 @@ // These functions don't access any memory visible to the compiler. // Note that this doesn't include objc_retainBlock, because it updates // pointers when it copies block data. - return MRI_NoModRef; + return ModRefInfo::MRI_NoModRef; default: break; } Index: lib/Analysis/ScopedNoAliasAA.cpp =================================================================== --- lib/Analysis/ScopedNoAliasAA.cpp +++ lib/Analysis/ScopedNoAliasAA.cpp @@ -102,12 +102,12 @@ if (!mayAliasInScopes(Loc.AATags.Scope, CS.getInstruction()->getMetadata( LLVMContext::MD_noalias))) - return MRI_NoModRef; + return ModRefInfo::MRI_NoModRef; if (!mayAliasInScopes( CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope), Loc.AATags.NoAlias)) - return MRI_NoModRef; + return ModRefInfo::MRI_NoModRef; return AAResultBase::getModRefInfo(CS, Loc); } @@ -120,12 +120,12 @@ if (!mayAliasInScopes( CS1.getInstruction()->getMetadata(LLVMContext::MD_alias_scope), CS2.getInstruction()->getMetadata(LLVMContext::MD_noalias))) - return MRI_NoModRef; + return ModRefInfo::MRI_NoModRef; if (!mayAliasInScopes( CS2.getInstruction()->getMetadata(LLVMContext::MD_alias_scope), CS1.getInstruction()->getMetadata(LLVMContext::MD_noalias))) - return MRI_NoModRef; + return ModRefInfo::MRI_NoModRef; return AAResultBase::getModRefInfo(CS1, CS2); } Index: lib/Analysis/TypeBasedAliasAnalysis.cpp =================================================================== --- lib/Analysis/TypeBasedAliasAnalysis.cpp +++ lib/Analysis/TypeBasedAliasAnalysis.cpp @@ -371,7 +371,7 @@ if (const MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_tbaa)) if (!Aliases(L, M)) - return MRI_NoModRef; + return ModRefInfo::MRI_NoModRef; return AAResultBase::getModRefInfo(CS, Loc); } @@ -386,7 +386,7 @@ if (const MDNode *M2 = CS2.getInstruction()->getMetadata(LLVMContext::MD_tbaa)) if (!Aliases(M1, M2)) - return MRI_NoModRef; + return ModRefInfo::MRI_NoModRef; return AAResultBase::getModRefInfo(CS1, CS2); } Index: lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp =================================================================== --- lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp +++ lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp @@ -2009,12 +2009,12 @@ SmallPtrSet Ignore1; Ignore1.insert(SI); - if (mayLoopAccessLocation(StoreBasePtr, MRI_ModRef, CurLoop, BECount, - StoreSize, *AA, Ignore1)) { + if (mayLoopAccessLocation(StoreBasePtr, ModRefInfo::MRI_ModRef, CurLoop, + BECount, StoreSize, *AA, Ignore1)) { // Check if the load is the offending instruction. Ignore1.insert(LI); - if (mayLoopAccessLocation(StoreBasePtr, MRI_ModRef, CurLoop, BECount, - StoreSize, *AA, Ignore1)) { + if (mayLoopAccessLocation(StoreBasePtr, ModRefInfo::MRI_ModRef, CurLoop, + BECount, StoreSize, *AA, Ignore1)) { // Still bad. Nothing we can do. goto CleanupAndExit; } @@ -2056,8 +2056,8 @@ SmallPtrSet Ignore2; Ignore2.insert(SI); - if (mayLoopAccessLocation(LoadBasePtr, MRI_Mod, CurLoop, BECount, StoreSize, - *AA, Ignore2)) + if (mayLoopAccessLocation(LoadBasePtr, ModRefInfo::MRI_Mod, CurLoop, BECount, + StoreSize, *AA, Ignore2)) goto CleanupAndExit; // Check the stride. Index: lib/Transforms/IPO/ArgumentPromotion.cpp =================================================================== --- lib/Transforms/IPO/ArgumentPromotion.cpp +++ lib/Transforms/IPO/ArgumentPromotion.cpp @@ -719,7 +719,8 @@ BasicBlock *BB = Load->getParent(); MemoryLocation Loc = MemoryLocation::get(Load); - if (AAR.canInstructionRangeModRef(BB->front(), *Load, Loc, MRI_Mod)) + if (AAR.canInstructionRangeModRef(BB->front(), *Load, Loc, + ModRefInfo::MRI_Mod)) return false; // Pointer is invalidated! // Now check every path from the entry block to the load for transparency. Index: lib/Transforms/Scalar/LoopIdiomRecognize.cpp =================================================================== --- lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -887,8 +887,8 @@ // base pointer and checking the region. Value *BasePtr = Expander.expandCodeFor(Start, DestInt8PtrTy, Preheader->getTerminator()); - if (mayLoopAccessLocation(BasePtr, MRI_ModRef, CurLoop, BECount, StoreSize, - *AA, Stores)) { + if (mayLoopAccessLocation(BasePtr, ModRefInfo::MRI_ModRef, CurLoop, BECount, + StoreSize, *AA, Stores)) { Expander.clear(); // If we generated new code for the base pointer, clean up. RecursivelyDeleteTriviallyDeadInstructions(BasePtr, TLI); @@ -997,8 +997,8 @@ SmallPtrSet Stores; Stores.insert(SI); - if (mayLoopAccessLocation(StoreBasePtr, MRI_ModRef, CurLoop, BECount, - StoreSize, *AA, Stores)) { + if (mayLoopAccessLocation(StoreBasePtr, ModRefInfo::MRI_ModRef, CurLoop, + BECount, StoreSize, *AA, Stores)) { Expander.clear(); // If we generated new code for the base pointer, clean up. RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI); @@ -1017,8 +1017,8 @@ Value *LoadBasePtr = Expander.expandCodeFor( LdStart, Builder.getInt8PtrTy(LdAS), Preheader->getTerminator()); - if (mayLoopAccessLocation(LoadBasePtr, MRI_Mod, CurLoop, BECount, StoreSize, - *AA, Stores)) { + if (mayLoopAccessLocation(LoadBasePtr, ModRefInfo::MRI_Mod, CurLoop, BECount, + StoreSize, *AA, Stores)) { Expander.clear(); // If we generated new code for the base pointer, clean up. RecursivelyDeleteTriviallyDeadInstructions(LoadBasePtr, TLI); Index: lib/Transforms/Scalar/MergedLoadStoreMotion.cpp =================================================================== --- lib/Transforms/Scalar/MergedLoadStoreMotion.cpp +++ lib/Transforms/Scalar/MergedLoadStoreMotion.cpp @@ -195,7 +195,7 @@ make_range(Start.getIterator(), End.getIterator())) if (Inst.mayThrow()) return true; - return AA->canInstructionRangeModRef(Start, End, Loc, MRI_ModRef); + return AA->canInstructionRangeModRef(Start, End, Loc, ModRefInfo::MRI_ModRef); } /// Index: unittests/Analysis/AliasAnalysisTest.cpp =================================================================== --- unittests/Analysis/AliasAnalysisTest.cpp +++ unittests/Analysis/AliasAnalysisTest.cpp @@ -191,18 +191,20 @@ auto &AA = getAAResults(*F); // Check basic results - EXPECT_EQ(AA.getModRefInfo(Store1, MemoryLocation()), MRI_Mod); - EXPECT_EQ(AA.getModRefInfo(Store1, None), MRI_Mod); - EXPECT_EQ(AA.getModRefInfo(Load1, MemoryLocation()), MRI_Ref); - EXPECT_EQ(AA.getModRefInfo(Load1, None), MRI_Ref); - EXPECT_EQ(AA.getModRefInfo(Add1, MemoryLocation()), MRI_NoModRef); - EXPECT_EQ(AA.getModRefInfo(Add1, None), MRI_NoModRef); - EXPECT_EQ(AA.getModRefInfo(VAArg1, MemoryLocation()), MRI_ModRef); - EXPECT_EQ(AA.getModRefInfo(VAArg1, None), MRI_ModRef); - EXPECT_EQ(AA.getModRefInfo(CmpXChg1, MemoryLocation()), MRI_ModRef); - EXPECT_EQ(AA.getModRefInfo(CmpXChg1, None), MRI_ModRef); - EXPECT_EQ(AA.getModRefInfo(AtomicRMW, MemoryLocation()), MRI_ModRef); - EXPECT_EQ(AA.getModRefInfo(AtomicRMW, None), MRI_ModRef); + EXPECT_EQ(AA.getModRefInfo(Store1, MemoryLocation()), ModRefInfo::MRI_Mod); + EXPECT_EQ(AA.getModRefInfo(Store1, None), ModRefInfo::MRI_Mod); + EXPECT_EQ(AA.getModRefInfo(Load1, MemoryLocation()), ModRefInfo::MRI_Ref); + EXPECT_EQ(AA.getModRefInfo(Load1, None), ModRefInfo::MRI_Ref); + EXPECT_EQ(AA.getModRefInfo(Add1, MemoryLocation()), ModRefInfo::MRI_NoModRef); + EXPECT_EQ(AA.getModRefInfo(Add1, None), ModRefInfo::MRI_NoModRef); + EXPECT_EQ(AA.getModRefInfo(VAArg1, MemoryLocation()), ModRefInfo::MRI_ModRef); + EXPECT_EQ(AA.getModRefInfo(VAArg1, None), ModRefInfo::MRI_ModRef); + EXPECT_EQ(AA.getModRefInfo(CmpXChg1, MemoryLocation()), + ModRefInfo::MRI_ModRef); + EXPECT_EQ(AA.getModRefInfo(CmpXChg1, None), ModRefInfo::MRI_ModRef); + EXPECT_EQ(AA.getModRefInfo(AtomicRMW, MemoryLocation()), + ModRefInfo::MRI_ModRef); + EXPECT_EQ(AA.getModRefInfo(AtomicRMW, None), ModRefInfo::MRI_ModRef); } class AAPassInfraTest : public testing::Test {