Index: llvm/trunk/include/llvm/Analysis/AliasAnalysis.h =================================================================== --- llvm/trunk/include/llvm/Analysis/AliasAnalysis.h +++ llvm/trunk/include/llvm/Analysis/AliasAnalysis.h @@ -98,55 +98,57 @@ /// they form a two bit matrix and bit-tests for 'mod' or 'ref' /// work with any of the possible values. -enum ModRefInfo { +enum class ModRefInfo { /// The access neither references nor modifies the value stored in memory. - MRI_NoModRef = 0, + NoModRef = 0, /// The access may reference the value stored in memory. - MRI_Ref = 1, + Ref = 1, /// The access may modify the value stored in memory. - MRI_Mod = 2, + Mod = 2, /// The access may reference and may modify the value stored in memory. - MRI_ModRef = MRI_Ref | MRI_Mod, + ModRef = Ref | Mod, }; LLVM_NODISCARD inline bool isNoModRef(const ModRefInfo MRI) { - return MRI == MRI_NoModRef; + return MRI == ModRefInfo::NoModRef; } LLVM_NODISCARD inline bool isModOrRefSet(const ModRefInfo MRI) { - return MRI & MRI_ModRef; + return static_cast(MRI) & static_cast(ModRefInfo::ModRef); } LLVM_NODISCARD inline bool isModAndRefSet(const ModRefInfo MRI) { - return (MRI & MRI_ModRef) == MRI_ModRef; + return (static_cast(MRI) & static_cast(ModRefInfo::ModRef)) == + static_cast(ModRefInfo::ModRef); } LLVM_NODISCARD inline bool isModSet(const ModRefInfo MRI) { - return MRI & MRI_Mod; + return static_cast(MRI) & static_cast(ModRefInfo::Mod); } LLVM_NODISCARD inline bool isRefSet(const ModRefInfo MRI) { - return MRI & MRI_Ref; + return static_cast(MRI) & static_cast(ModRefInfo::Ref); } -LLVM_NODISCARD inline ModRefInfo setRef(const ModRefInfo MRI) { - return ModRefInfo(MRI | MRI_Ref); -} LLVM_NODISCARD inline ModRefInfo setMod(const ModRefInfo MRI) { - return ModRefInfo(MRI | MRI_Mod); + return ModRefInfo(static_cast(MRI) | static_cast(ModRefInfo::Mod)); +} +LLVM_NODISCARD inline ModRefInfo setRef(const ModRefInfo MRI) { + return ModRefInfo(static_cast(MRI) | static_cast(ModRefInfo::Ref)); } LLVM_NODISCARD inline ModRefInfo setModAndRef(const ModRefInfo MRI) { - return ModRefInfo(MRI | MRI_ModRef); + return ModRefInfo(static_cast(MRI) | + static_cast(ModRefInfo::ModRef)); } LLVM_NODISCARD inline ModRefInfo clearMod(const ModRefInfo MRI) { - return ModRefInfo(MRI & MRI_Ref); + return ModRefInfo(static_cast(MRI) & static_cast(ModRefInfo::Ref)); } LLVM_NODISCARD inline ModRefInfo clearRef(const ModRefInfo MRI) { - return ModRefInfo(MRI & MRI_Mod); + return ModRefInfo(static_cast(MRI) & static_cast(ModRefInfo::Mod)); } LLVM_NODISCARD inline ModRefInfo unionModRef(const ModRefInfo MRI1, const ModRefInfo MRI2) { - return ModRefInfo(MRI1 | MRI2); + return ModRefInfo(static_cast(MRI1) | static_cast(MRI2)); } LLVM_NODISCARD inline ModRefInfo intersectModRef(const ModRefInfo MRI1, const ModRefInfo MRI2) { - return ModRefInfo(MRI1 & MRI2); + return ModRefInfo(static_cast(MRI1) & static_cast(MRI2)); } /// The locations at which a function might access memory. @@ -176,27 +178,31 @@ /// This property corresponds to the GCC 'const' attribute. /// This property corresponds to the LLVM IR 'readnone' attribute. /// This property corresponds to the IntrNoMem LLVM intrinsic flag. - FMRB_DoesNotAccessMemory = FMRL_Nowhere | MRI_NoModRef, + FMRB_DoesNotAccessMemory = + FMRL_Nowhere | static_cast(ModRefInfo::NoModRef), /// The only memory references in this function (if it has any) are /// non-volatile loads from objects pointed to by its pointer-typed /// arguments, with arbitrary offsets. /// /// This property corresponds to the IntrReadArgMem LLVM intrinsic flag. - FMRB_OnlyReadsArgumentPointees = FMRL_ArgumentPointees | MRI_Ref, + FMRB_OnlyReadsArgumentPointees = + FMRL_ArgumentPointees | static_cast(ModRefInfo::Ref), /// The only memory references in this function (if it has any) are /// non-volatile loads and stores from objects pointed to by its /// pointer-typed arguments, with arbitrary offsets. /// /// This property corresponds to the IntrArgMemOnly LLVM intrinsic flag. - FMRB_OnlyAccessesArgumentPointees = FMRL_ArgumentPointees | MRI_ModRef, + FMRB_OnlyAccessesArgumentPointees = + FMRL_ArgumentPointees | static_cast(ModRefInfo::ModRef), /// The only memory references in this function (if it has any) are /// references of memory that is otherwise inaccessible via LLVM IR. /// /// This property corresponds to the LLVM IR inaccessiblememonly attribute. - FMRB_OnlyAccessesInaccessibleMem = FMRL_InaccessibleMem | MRI_ModRef, + FMRB_OnlyAccessesInaccessibleMem = + FMRL_InaccessibleMem | static_cast(ModRefInfo::ModRef), /// The function may perform non-volatile loads and stores of objects /// pointed to by its pointer-typed arguments, with arbitrary offsets, and @@ -206,7 +212,8 @@ /// This property corresponds to the LLVM IR /// inaccessiblemem_or_argmemonly attribute. FMRB_OnlyAccessesInaccessibleOrArgMem = FMRL_InaccessibleMem | - FMRL_ArgumentPointees | MRI_ModRef, + FMRL_ArgumentPointees | + static_cast(ModRefInfo::ModRef), /// This function does not perform any non-local stores or volatile loads, /// but may read from any memory location. @@ -214,18 +221,19 @@ /// This property corresponds to the GCC 'pure' attribute. /// This property corresponds to the LLVM IR 'readonly' attribute. /// This property corresponds to the IntrReadMem LLVM intrinsic flag. - FMRB_OnlyReadsMemory = FMRL_Anywhere | MRI_Ref, + FMRB_OnlyReadsMemory = FMRL_Anywhere | static_cast(ModRefInfo::Ref), // This function does not read from memory anywhere, but may write to any // memory location. // // This property corresponds to the LLVM IR 'writeonly' attribute. // This property corresponds to the IntrWriteMem LLVM intrinsic flag. - FMRB_DoesNotReadMemory = FMRL_Anywhere | MRI_Mod, + FMRB_DoesNotReadMemory = FMRL_Anywhere | static_cast(ModRefInfo::Mod), /// This indicates that the function could not be classified into one of the /// behaviors above. - FMRB_UnknownModRefBehavior = FMRL_Anywhere | MRI_ModRef + FMRB_UnknownModRefBehavior = + FMRL_Anywhere | static_cast(ModRefInfo::ModRef) }; // Wrapper method strips bits significant only in FunctionModRefBehavior, @@ -234,7 +242,7 @@ // entry with all bits set to 1. LLVM_NODISCARD inline ModRefInfo createModRefInfo(const FunctionModRefBehavior FMRB) { - return ModRefInfo(FMRB & MRI_ModRef); + return ModRefInfo(FMRB & static_cast(ModRefInfo::ModRef)); } class AAResults { @@ -593,7 +601,7 @@ case Instruction::CatchRet: return getModRefInfo((const CatchReturnInst *)I, Loc); default: - return MRI_NoModRef; + return ModRefInfo::NoModRef; } } @@ -894,7 +902,7 @@ } ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) { - return MRI_ModRef; + return ModRefInfo::ModRef; } FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) { @@ -906,11 +914,11 @@ } ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) { - return MRI_ModRef; + return ModRefInfo::ModRef; } ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) { - return MRI_ModRef; + return ModRefInfo::ModRef; } }; Index: llvm/trunk/lib/Analysis/AliasAnalysis.cpp =================================================================== --- llvm/trunk/lib/Analysis/AliasAnalysis.cpp +++ llvm/trunk/lib/Analysis/AliasAnalysis.cpp @@ -119,7 +119,7 @@ } ModRefInfo AAResults::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) { - ModRefInfo Result = MRI_ModRef; + ModRefInfo Result = ModRefInfo::ModRef; for (const auto &AA : AAs) { Result = intersectModRef(Result, AA->getArgModRefInfo(CS, ArgIdx)); @@ -138,8 +138,8 @@ // Check if the two calls modify the same memory return getModRefInfo(CS, Call); } else if (I->isFenceLike()) { - // If this is a fence, just return MRI_ModRef. - return MRI_ModRef; + // If this is a fence, just return ModRef. + return ModRefInfo::ModRef; } else { // Otherwise, check if the call modifies or references the // location this memory access defines. The best we can say @@ -150,12 +150,12 @@ if (isModOrRefSet(MR)) return setModAndRef(MR); } - return MRI_NoModRef; + return ModRefInfo::NoModRef; } ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) { - ModRefInfo Result = MRI_ModRef; + ModRefInfo Result = ModRefInfo::ModRef; for (const auto &AA : AAs) { Result = intersectModRef(Result, AA->getModRefInfo(CS, Loc)); @@ -170,7 +170,7 @@ auto MRB = getModRefBehavior(CS); if (MRB == FMRB_DoesNotAccessMemory || MRB == FMRB_OnlyAccessesInaccessibleMem) - return MRI_NoModRef; + return ModRefInfo::NoModRef; if (onlyReadsMemory(MRB)) Result = clearMod(Result); @@ -179,7 +179,7 @@ if (onlyAccessesArgPointees(MRB) || onlyAccessesInaccessibleOrArgMem(MRB)) { bool DoesAlias = false; - ModRefInfo AllArgsMask = MRI_NoModRef; + ModRefInfo AllArgsMask = ModRefInfo::NoModRef; if (doesAccessArgPointees(MRB)) { for (auto AI = CS.arg_begin(), AE = CS.arg_end(); AI != AE; ++AI) { const Value *Arg = *AI; @@ -195,9 +195,9 @@ } } } - // Return MRI_NoModRef if no alias found with any argument. + // Return NoModRef if no alias found with any argument. if (!DoesAlias) - return MRI_NoModRef; + return ModRefInfo::NoModRef; // Logical & between other AA analyses and argument analysis. Result = intersectModRef(Result, AllArgsMask); } @@ -212,7 +212,7 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) { - ModRefInfo Result = MRI_ModRef; + ModRefInfo Result = ModRefInfo::ModRef; for (const auto &AA : AAs) { Result = intersectModRef(Result, AA->getModRefInfo(CS1, CS2)); @@ -228,15 +228,15 @@ // If CS1 or CS2 are readnone, they don't interact. auto CS1B = getModRefBehavior(CS1); if (CS1B == FMRB_DoesNotAccessMemory) - return MRI_NoModRef; + return ModRefInfo::NoModRef; auto CS2B = getModRefBehavior(CS2); if (CS2B == FMRB_DoesNotAccessMemory) - return MRI_NoModRef; + return ModRefInfo::NoModRef; // If they both only read from memory, there is no dependence. if (onlyReadsMemory(CS1B) && onlyReadsMemory(CS2B)) - return MRI_NoModRef; + return ModRefInfo::NoModRef; // If CS1 only reads memory, the only dependence on CS2 can be // from CS1 reading memory written by CS2. @@ -249,7 +249,7 @@ // information from CS1's references to the memory referenced by // CS2's arguments. if (onlyAccessesArgPointees(CS2B)) { - ModRefInfo R = MRI_NoModRef; + ModRefInfo R = ModRefInfo::NoModRef; if (doesAccessArgPointees(CS2B)) { for (auto I = CS2.arg_begin(), E = CS2.arg_end(); I != E; ++I) { const Value *Arg = *I; @@ -263,11 +263,11 @@ // - If CS2 modifies location, dependence exists if CS1 reads or writes. // - If CS2 only reads location, dependence exists if CS1 writes. ModRefInfo ArgModRefCS2 = getArgModRefInfo(CS2, CS2ArgIdx); - ModRefInfo ArgMask = MRI_NoModRef; + ModRefInfo ArgMask = ModRefInfo::NoModRef; if (isModSet(ArgModRefCS2)) - ArgMask = MRI_ModRef; + ArgMask = ModRefInfo::ModRef; else if (isRefSet(ArgModRefCS2)) - ArgMask = MRI_Mod; + ArgMask = ModRefInfo::Mod; // ModRefCS1 indicates what CS1 might do to CS2ArgLoc, and we use // above ArgMask to update dependence info. @@ -285,7 +285,7 @@ // If CS1 only accesses memory through arguments, check if CS2 references // any of the memory referenced by CS1's arguments. If not, return NoModRef. if (onlyAccessesArgPointees(CS1B)) { - ModRefInfo R = MRI_NoModRef; + ModRefInfo R = ModRefInfo::NoModRef; if (doesAccessArgPointees(CS1B)) { for (auto I = CS1.arg_begin(), E = CS1.arg_end(); I != E; ++I) { const Value *Arg = *I; @@ -349,45 +349,45 @@ const MemoryLocation &Loc) { // Be conservative in the face of atomic. if (isStrongerThan(L->getOrdering(), AtomicOrdering::Unordered)) - return MRI_ModRef; + return ModRefInfo::ModRef; // If the load address doesn't alias the given address, it doesn't read // or write the specified memory. if (Loc.Ptr && !alias(MemoryLocation::get(L), Loc)) - return MRI_NoModRef; + return ModRefInfo::NoModRef; // Otherwise, a load just reads. - return MRI_Ref; + return ModRefInfo::Ref; } ModRefInfo AAResults::getModRefInfo(const StoreInst *S, const MemoryLocation &Loc) { // Be conservative in the face of atomic. if (isStrongerThan(S->getOrdering(), AtomicOrdering::Unordered)) - return MRI_ModRef; + return ModRefInfo::ModRef; if (Loc.Ptr) { // If the store address cannot alias the pointer in question, then the // specified memory cannot be modified by the store. if (!alias(MemoryLocation::get(S), Loc)) - return MRI_NoModRef; + return ModRefInfo::NoModRef; // If the pointer is a pointer to constant memory, then it could not have // been modified by this store. if (pointsToConstantMemory(Loc)) - return MRI_NoModRef; + return ModRefInfo::NoModRef; } // Otherwise, a store just writes. - return MRI_Mod; + return ModRefInfo::Mod; } ModRefInfo AAResults::getModRefInfo(const FenceInst *S, const MemoryLocation &Loc) { // If we know that the location is a constant memory location, the fence // cannot modify this location. if (Loc.Ptr && pointsToConstantMemory(Loc)) - return MRI_Ref; - return MRI_ModRef; + return ModRefInfo::Ref; + return ModRefInfo::ModRef; } ModRefInfo AAResults::getModRefInfo(const VAArgInst *V, @@ -396,16 +396,16 @@ // If the va_arg address cannot alias the pointer in question, then the // specified memory cannot be accessed by the va_arg. if (!alias(MemoryLocation::get(V), Loc)) - return MRI_NoModRef; + return ModRefInfo::NoModRef; // If the pointer is a pointer to constant memory, then it could not have // been modified by this va_arg. if (pointsToConstantMemory(Loc)) - return MRI_NoModRef; + return ModRefInfo::NoModRef; } // Otherwise, a va_arg reads and writes. - return MRI_ModRef; + return ModRefInfo::ModRef; } ModRefInfo AAResults::getModRefInfo(const CatchPadInst *CatchPad, @@ -414,11 +414,11 @@ // If the pointer is a pointer to constant memory, // then it could not have been modified by this catchpad. if (pointsToConstantMemory(Loc)) - return MRI_NoModRef; + return ModRefInfo::NoModRef; } // Otherwise, a catchpad reads and writes. - return MRI_ModRef; + return ModRefInfo::ModRef; } ModRefInfo AAResults::getModRefInfo(const CatchReturnInst *CatchRet, @@ -427,37 +427,37 @@ // If the pointer is a pointer to constant memory, // then it could not have been modified by this catchpad. if (pointsToConstantMemory(Loc)) - return MRI_NoModRef; + return ModRefInfo::NoModRef; } // Otherwise, a catchret reads and writes. - return MRI_ModRef; + return ModRefInfo::ModRef; } ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX, const MemoryLocation &Loc) { // Acquire/Release cmpxchg has properties that matter for arbitrary addresses. if (isStrongerThanMonotonic(CX->getSuccessOrdering())) - return MRI_ModRef; + return ModRefInfo::ModRef; // If the cmpxchg address does not alias the location, it does not access it. if (Loc.Ptr && !alias(MemoryLocation::get(CX), Loc)) - return MRI_NoModRef; + return ModRefInfo::NoModRef; - return MRI_ModRef; + return ModRefInfo::ModRef; } ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW, const MemoryLocation &Loc) { // Acquire/Release atomicrmw has properties that matter for arbitrary addresses. if (isStrongerThanMonotonic(RMW->getOrdering())) - return MRI_ModRef; + return ModRefInfo::ModRef; // If the atomicrmw address does not alias the location, it does not access it. if (Loc.Ptr && !alias(MemoryLocation::get(RMW), Loc)) - return MRI_NoModRef; + return ModRefInfo::NoModRef; - return MRI_ModRef; + return ModRefInfo::ModRef; } /// \brief Return information about whether a particular call site modifies @@ -473,26 +473,26 @@ DominatorTree *DT, OrderedBasicBlock *OBB) { if (!DT) - return MRI_ModRef; + return ModRefInfo::ModRef; const Value *Object = GetUnderlyingObject(MemLoc.Ptr, I->getModule()->getDataLayout()); if (!isIdentifiedObject(Object) || isa(Object) || isa(Object)) - return MRI_ModRef; + return ModRefInfo::ModRef; ImmutableCallSite CS(I); if (!CS.getInstruction() || CS.getInstruction() == Object) - return MRI_ModRef; + return ModRefInfo::ModRef; if (PointerMayBeCapturedBefore(Object, /* ReturnCaptures */ true, /* StoreCaptures */ true, I, DT, /* include Object */ true, /* OrderedBasicBlock */ OBB)) - return MRI_ModRef; + return ModRefInfo::ModRef; unsigned ArgNo = 0; - ModRefInfo R = MRI_NoModRef; + ModRefInfo R = ModRefInfo::NoModRef; for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end(); CI != CE; ++CI, ++ArgNo) { // Only look at the no-capture or byval pointer arguments. If this @@ -512,10 +512,10 @@ if (CS.doesNotAccessMemory(ArgNo)) continue; if (CS.onlyReadsMemory(ArgNo)) { - R = MRI_Ref; + R = ModRefInfo::Ref; continue; } - return MRI_ModRef; + return ModRefInfo::ModRef; } return R; } @@ -525,7 +525,7 @@ /// bool AAResults::canBasicBlockModify(const BasicBlock &BB, const MemoryLocation &Loc) { - return canInstructionRangeModRef(BB.front(), BB.back(), Loc, MRI_Mod); + return canInstructionRangeModRef(BB.front(), BB.back(), Loc, ModRefInfo::Mod); } /// canInstructionRangeModRef - Return true if it is possible for the Index: llvm/trunk/lib/Analysis/AliasAnalysisEvaluator.cpp =================================================================== --- llvm/trunk/lib/Analysis/AliasAnalysisEvaluator.cpp +++ llvm/trunk/lib/Analysis/AliasAnalysisEvaluator.cpp @@ -244,20 +244,20 @@ if (ElTy->isSized()) Size = DL.getTypeStoreSize(ElTy); switch (AA.getModRefInfo(C, Pointer, Size)) { - case MRI_NoModRef: + case ModRefInfo::NoModRef: PrintModRefResults("NoModRef", PrintNoModRef, I, Pointer, F.getParent()); ++NoModRefCount; break; - case MRI_Mod: + case ModRefInfo::Mod: PrintModRefResults("Just Mod", PrintMod, I, Pointer, F.getParent()); ++ModCount; break; - case MRI_Ref: + case ModRefInfo::Ref: PrintModRefResults("Just Ref", PrintRef, I, Pointer, F.getParent()); ++RefCount; break; - case MRI_ModRef: + case ModRefInfo::ModRef: PrintModRefResults("Both ModRef", PrintModRef, I, Pointer, F.getParent()); ++ModRefCount; @@ -272,19 +272,19 @@ if (D == C) continue; switch (AA.getModRefInfo(*C, *D)) { - case MRI_NoModRef: + case ModRefInfo::NoModRef: PrintModRefResults("NoModRef", PrintNoModRef, *C, *D, F.getParent()); ++NoModRefCount; break; - case MRI_Mod: + case ModRefInfo::Mod: PrintModRefResults("Just Mod", PrintMod, *C, *D, F.getParent()); ++ModCount; break; - case MRI_Ref: + case ModRefInfo::Ref: PrintModRefResults("Just Ref", PrintRef, *C, *D, F.getParent()); ++RefCount; break; - case MRI_ModRef: + case ModRefInfo::ModRef: PrintModRefResults("Both ModRef", PrintModRef, *C, *D, F.getParent()); ++ModRefCount; break; Index: llvm/trunk/lib/Analysis/BasicAliasAnalysis.cpp =================================================================== --- llvm/trunk/lib/Analysis/BasicAliasAnalysis.cpp +++ llvm/trunk/lib/Analysis/BasicAliasAnalysis.cpp @@ -687,13 +687,13 @@ unsigned ArgIdx) { // Checking for known builtin intrinsics and target library functions. if (isWriteOnlyParam(CS, ArgIdx, TLI)) - return MRI_Mod; + return ModRefInfo::Mod; if (CS.paramHasAttr(ArgIdx, Attribute::ReadOnly)) - return MRI_Ref; + return ModRefInfo::Ref; if (CS.paramHasAttr(ArgIdx, Attribute::ReadNone)) - return MRI_NoModRef; + return ModRefInfo::NoModRef; return AAResultBase::getArgModRefInfo(CS, ArgIdx); } @@ -770,7 +770,7 @@ if (isa(Object)) if (const CallInst *CI = dyn_cast(CS.getInstruction())) if (CI->isTailCall()) - return MRI_NoModRef; + return ModRefInfo::NoModRef; // If the pointer is to a locally allocated object that does not escape, // then the call can not mod/ref the pointer unless the call takes the pointer @@ -780,7 +780,7 @@ // Optimistically assume that call doesn't touch Object and check this // assumption in the following loop. - ModRefInfo Result = MRI_NoModRef; + ModRefInfo Result = ModRefInfo::NoModRef; unsigned OperandNo = 0; for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end(); @@ -818,7 +818,7 @@ continue; } // This operand aliases 'Object' and call reads and writes into it. - Result = MRI_ModRef; + Result = ModRefInfo::ModRef; break; } @@ -838,7 +838,7 @@ // Be conservative if the accessed pointer may alias the allocation - // fallback to the generic handling below. if (getBestAAResults().alias(MemoryLocation(Inst), Loc) == NoAlias) - return MRI_NoModRef; + return ModRefInfo::NoModRef; } // The semantics of memcpy intrinsics forbid overlap between their respective @@ -851,14 +851,14 @@ if ((SrcAA = getBestAAResults().alias(MemoryLocation::getForSource(Inst), Loc)) == MustAlias) // Loc is exactly the memcpy source thus disjoint from memcpy dest. - return MRI_Ref; + return ModRefInfo::Ref; if ((DestAA = getBestAAResults().alias(MemoryLocation::getForDest(Inst), Loc)) == MustAlias) // The converse case. - return MRI_Mod; + return ModRefInfo::Mod; // It's also possible for Loc to alias both src and dest, or neither. - ModRefInfo rv = MRI_NoModRef; + ModRefInfo rv = ModRefInfo::NoModRef; if (SrcAA != NoAlias) rv = setRef(rv); if (DestAA != NoAlias) @@ -870,7 +870,7 @@ // proper control dependencies will be maintained, it never aliases any // particular memory location. if (isIntrinsicCall(CS, Intrinsic::assume)) - return MRI_NoModRef; + return ModRefInfo::NoModRef; // Like assumes, guard intrinsics are also marked as arbitrarily writing so // that proper control dependencies are maintained but they never mods any @@ -880,7 +880,7 @@ // heap state at the point the guard is issued needs to be consistent in case // the guard invokes the "deopt" continuation. if (isIntrinsicCall(CS, Intrinsic::experimental_guard)) - return MRI_Ref; + return ModRefInfo::Ref; // Like assumes, invariant.start intrinsics were also marked as arbitrarily // writing so that proper control dependencies are maintained but they never @@ -906,7 +906,7 @@ // rules of invariant.start) and print 40, while the first program always // prints 50. if (isIntrinsicCall(CS, Intrinsic::invariant_start)) - return MRI_Ref; + return ModRefInfo::Ref; // The AAResultBase base class has some smarts, lets use them. return AAResultBase::getModRefInfo(CS, Loc); @@ -919,7 +919,7 @@ // particular memory location. if (isIntrinsicCall(CS1, Intrinsic::assume) || isIntrinsicCall(CS2, Intrinsic::assume)) - return MRI_NoModRef; + return ModRefInfo::NoModRef; // Like assumes, guard intrinsics are also marked as arbitrarily writing so // that proper control dependencies are maintained but they never mod any @@ -933,12 +933,14 @@ // possibilities for guard intrinsics. if (isIntrinsicCall(CS1, Intrinsic::experimental_guard)) - return isModSet(createModRefInfo(getModRefBehavior(CS2))) ? MRI_Ref - : MRI_NoModRef; + return isModSet(createModRefInfo(getModRefBehavior(CS2))) + ? ModRefInfo::Ref + : ModRefInfo::NoModRef; if (isIntrinsicCall(CS2, Intrinsic::experimental_guard)) - return isModSet(createModRefInfo(getModRefBehavior(CS1))) ? MRI_Mod - : MRI_NoModRef; + return isModSet(createModRefInfo(getModRefBehavior(CS1))) + ? ModRefInfo::Mod + : ModRefInfo::NoModRef; // The AAResultBase base class has some smarts, lets use them. return AAResultBase::getModRefInfo(CS1, CS2); Index: llvm/trunk/lib/Analysis/GlobalsModRef.cpp =================================================================== --- llvm/trunk/lib/Analysis/GlobalsModRef.cpp +++ llvm/trunk/lib/Analysis/GlobalsModRef.cpp @@ -88,9 +88,9 @@ enum { MayReadAnyGlobal = 4 }; /// Checks to document the invariants of the bit packing here. - static_assert((MayReadAnyGlobal & MRI_ModRef) == 0, + static_assert((MayReadAnyGlobal & static_cast(ModRefInfo::ModRef)) == 0, "ModRef and the MayReadAnyGlobal flag bits overlap."); - static_assert(((MayReadAnyGlobal | MRI_ModRef) >> + static_assert(((MayReadAnyGlobal | static_cast(ModRefInfo::ModRef)) >> AlignedMapPointerTraits::NumLowBitsAvailable) == 0, "Insufficient low bits to store our flag and ModRef info."); @@ -127,12 +127,12 @@ /// Returns the \c ModRefInfo info for this function. ModRefInfo getModRefInfo() const { - return ModRefInfo(Info.getInt() & MRI_ModRef); + return ModRefInfo(Info.getInt() & static_cast(ModRefInfo::ModRef)); } /// Adds new \c ModRefInfo for this function to its state. void addModRefInfo(ModRefInfo NewMRI) { - Info.setInt(Info.getInt() | NewMRI); + Info.setInt(Info.getInt() | static_cast(NewMRI)); } /// Returns whether this function may read any global variable, and we don't @@ -145,7 +145,8 @@ /// Returns the \c ModRefInfo info for this function w.r.t. a particular /// global, which may be more precise than the general information above. ModRefInfo getModRefInfoForGlobal(const GlobalValue &GV) const { - ModRefInfo GlobalMRI = mayReadAnyGlobal() ? MRI_Ref : MRI_NoModRef; + ModRefInfo GlobalMRI = + mayReadAnyGlobal() ? ModRefInfo::Ref : ModRefInfo::NoModRef; if (AlignedMap *P = Info.getPointer()) { auto I = P->Map.find(&GV); if (I != P->Map.end()) @@ -155,7 +156,7 @@ } /// Add mod/ref info from another function into ours, saturating towards - /// MRI_ModRef. + /// ModRef. void addFunctionInfo(const FunctionInfo &FI) { addModRefInfo(FI.getModRefInfo()); @@ -298,7 +299,7 @@ Handles.emplace_front(*this, Reader); Handles.front().I = Handles.begin(); } - FunctionInfos[Reader].addModRefInfoForGlobal(GV, MRI_Ref); + FunctionInfos[Reader].addModRefInfoForGlobal(GV, ModRefInfo::Ref); } if (!GV.isConstant()) // No need to keep track of writers to constants @@ -307,7 +308,7 @@ Handles.emplace_front(*this, Writer); Handles.front().I = Handles.begin(); } - FunctionInfos[Writer].addModRefInfoForGlobal(GV, MRI_Mod); + FunctionInfos[Writer].addModRefInfoForGlobal(GV, ModRefInfo::Mod); } ++NumNonAddrTakenGlobalVars; @@ -503,13 +504,13 @@ if (F->doesNotAccessMemory()) { // Can't do better than that! } else if (F->onlyReadsMemory()) { - FI.addModRefInfo(MRI_Ref); + FI.addModRefInfo(ModRefInfo::Ref); if (!F->isIntrinsic() && !F->onlyAccessesArgMemory()) // This function might call back into the module and read a global - // consider every global as possibly being read by this function. FI.setMayReadAnyGlobal(); } else { - FI.addModRefInfo(MRI_ModRef); + FI.addModRefInfo(ModRefInfo::ModRef); // Can't say anything useful unless it's an intrinsic - they don't // read or write global variables of the kind considered here. KnowNothing = !F->isIntrinsic(); @@ -564,7 +565,7 @@ if (isAllocationFn(&I, &TLI) || isFreeCall(&I, &TLI)) { // FIXME: It is completely unclear why this is necessary and not // handled by the above graph code. - FI.addModRefInfo(MRI_ModRef); + FI.addModRefInfo(ModRefInfo::ModRef); } else if (Function *Callee = CS.getCalledFunction()) { // The callgraph doesn't include intrinsic calls. if (Callee->isIntrinsic()) { @@ -579,9 +580,9 @@ // All non-call instructions we use the primary predicates for whether // thay read or write memory. if (I.mayReadFromMemory()) - FI.addModRefInfo(MRI_Ref); + FI.addModRefInfo(ModRefInfo::Ref); if (I.mayWriteToMemory()) - FI.addModRefInfo(MRI_Mod); + FI.addModRefInfo(ModRefInfo::Mod); } } @@ -868,8 +869,9 @@ ModRefInfo GlobalsAAResult::getModRefInfoForArgument(ImmutableCallSite CS, const GlobalValue *GV) { if (CS.doesNotAccessMemory()) - return MRI_NoModRef; - ModRefInfo ConservativeResult = CS.onlyReadsMemory() ? MRI_Ref : MRI_ModRef; + return ModRefInfo::NoModRef; + ModRefInfo ConservativeResult = + CS.onlyReadsMemory() ? ModRefInfo::Ref : ModRefInfo::ModRef; // Iterate through all the arguments to the called function. If any argument // is based on GV, return the conservative result. @@ -890,12 +892,12 @@ } // We identified all objects in the argument list, and none of them were GV. - return MRI_NoModRef; + return ModRefInfo::NoModRef; } ModRefInfo GlobalsAAResult::getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) { - ModRefInfo Known = MRI_ModRef; + ModRefInfo Known = ModRefInfo::ModRef; // If we are asking for mod/ref info of a direct call with a pointer to a // global we are tracking, return information if we have it. @@ -909,7 +911,7 @@ getModRefInfoForArgument(CS, GV)); if (!isModOrRefSet(Known)) - return MRI_NoModRef; // No need to query other mod/ref analyses + return ModRefInfo::NoModRef; // No need to query other mod/ref analyses return intersectModRef(Known, AAResultBase::getModRefInfo(CS, Loc)); } Index: llvm/trunk/lib/Analysis/MemoryDependenceAnalysis.cpp =================================================================== --- llvm/trunk/lib/Analysis/MemoryDependenceAnalysis.cpp +++ llvm/trunk/lib/Analysis/MemoryDependenceAnalysis.cpp @@ -119,38 +119,38 @@ if (const LoadInst *LI = dyn_cast(Inst)) { if (LI->isUnordered()) { Loc = MemoryLocation::get(LI); - return MRI_Ref; + return ModRefInfo::Ref; } if (LI->getOrdering() == AtomicOrdering::Monotonic) { Loc = MemoryLocation::get(LI); - return MRI_ModRef; + return ModRefInfo::ModRef; } Loc = MemoryLocation(); - return MRI_ModRef; + return ModRefInfo::ModRef; } if (const StoreInst *SI = dyn_cast(Inst)) { if (SI->isUnordered()) { Loc = MemoryLocation::get(SI); - return MRI_Mod; + return ModRefInfo::Mod; } if (SI->getOrdering() == AtomicOrdering::Monotonic) { Loc = MemoryLocation::get(SI); - return MRI_ModRef; + return ModRefInfo::ModRef; } Loc = MemoryLocation(); - return MRI_ModRef; + return ModRefInfo::ModRef; } if (const VAArgInst *V = dyn_cast(Inst)) { Loc = MemoryLocation::get(V); - return MRI_ModRef; + return ModRefInfo::ModRef; } if (const CallInst *CI = isFreeCall(Inst, &TLI)) { // calls to free() deallocate the entire structure Loc = MemoryLocation(CI->getArgOperand(0)); - return MRI_Mod; + return ModRefInfo::Mod; } if (const IntrinsicInst *II = dyn_cast(Inst)) { @@ -166,7 +166,7 @@ cast(II->getArgOperand(0))->getZExtValue(), AAInfo); // These intrinsics don't really modify the memory, but returning Mod // will allow them to be handled conservatively. - return MRI_Mod; + return ModRefInfo::Mod; case Intrinsic::invariant_end: II->getAAMetadata(AAInfo); Loc = MemoryLocation( @@ -174,7 +174,7 @@ cast(II->getArgOperand(1))->getZExtValue(), AAInfo); // These intrinsics don't really modify the memory, but returning Mod // will allow them to be handled conservatively. - return MRI_Mod; + return ModRefInfo::Mod; default: break; } @@ -182,10 +182,10 @@ // Otherwise, just do the coarse-grained thing that always works. if (Inst->mayWriteToMemory()) - return MRI_ModRef; + return ModRefInfo::ModRef; if (Inst->mayReadFromMemory()) - return MRI_Ref; - return MRI_NoModRef; + return ModRefInfo::Ref; + return ModRefInfo::NoModRef; } /// Private helper for finding the local dependencies of a call site. @@ -689,12 +689,12 @@ if (isModAndRefSet(MR)) MR = AA.callCapturesBefore(Inst, MemLoc, &DT, &OBB); switch (MR) { - case MRI_NoModRef: + case ModRefInfo::NoModRef: // If the call has no effect on the queried pointer, just ignore it. continue; - case MRI_Mod: + case ModRefInfo::Mod: return MemDepResult::getClobber(Inst); - case MRI_Ref: + case ModRefInfo::Ref: // If the call is known to never store to the pointer, and if this is a // load query, we can safely ignore it (scan past it). if (isLoad) Index: llvm/trunk/lib/Analysis/ObjCARCAliasAnalysis.cpp =================================================================== --- llvm/trunk/lib/Analysis/ObjCARCAliasAnalysis.cpp +++ llvm/trunk/lib/Analysis/ObjCARCAliasAnalysis.cpp @@ -123,7 +123,7 @@ // These functions don't access any memory visible to the compiler. // Note that this doesn't include objc_retainBlock, because it updates // pointers when it copies block data. - return MRI_NoModRef; + return ModRefInfo::NoModRef; default: break; } Index: llvm/trunk/lib/Analysis/ScopedNoAliasAA.cpp =================================================================== --- llvm/trunk/lib/Analysis/ScopedNoAliasAA.cpp +++ llvm/trunk/lib/Analysis/ScopedNoAliasAA.cpp @@ -102,12 +102,12 @@ if (!mayAliasInScopes(Loc.AATags.Scope, CS.getInstruction()->getMetadata( LLVMContext::MD_noalias))) - return MRI_NoModRef; + return ModRefInfo::NoModRef; if (!mayAliasInScopes( CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope), Loc.AATags.NoAlias)) - return MRI_NoModRef; + return ModRefInfo::NoModRef; return AAResultBase::getModRefInfo(CS, Loc); } @@ -120,12 +120,12 @@ if (!mayAliasInScopes( CS1.getInstruction()->getMetadata(LLVMContext::MD_alias_scope), CS2.getInstruction()->getMetadata(LLVMContext::MD_noalias))) - return MRI_NoModRef; + return ModRefInfo::NoModRef; if (!mayAliasInScopes( CS2.getInstruction()->getMetadata(LLVMContext::MD_alias_scope), CS1.getInstruction()->getMetadata(LLVMContext::MD_noalias))) - return MRI_NoModRef; + return ModRefInfo::NoModRef; return AAResultBase::getModRefInfo(CS1, CS2); } Index: llvm/trunk/lib/Analysis/TypeBasedAliasAnalysis.cpp =================================================================== --- llvm/trunk/lib/Analysis/TypeBasedAliasAnalysis.cpp +++ llvm/trunk/lib/Analysis/TypeBasedAliasAnalysis.cpp @@ -371,7 +371,7 @@ if (const MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_tbaa)) if (!Aliases(L, M)) - return MRI_NoModRef; + return ModRefInfo::NoModRef; return AAResultBase::getModRefInfo(CS, Loc); } @@ -386,7 +386,7 @@ if (const MDNode *M2 = CS2.getInstruction()->getMetadata(LLVMContext::MD_tbaa)) if (!Aliases(M1, M2)) - return MRI_NoModRef; + return ModRefInfo::NoModRef; return AAResultBase::getModRefInfo(CS1, CS2); } Index: llvm/trunk/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp =================================================================== --- llvm/trunk/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp +++ llvm/trunk/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp @@ -2009,12 +2009,12 @@ SmallPtrSet Ignore1; Ignore1.insert(SI); - if (mayLoopAccessLocation(StoreBasePtr, MRI_ModRef, CurLoop, BECount, + if (mayLoopAccessLocation(StoreBasePtr, ModRefInfo::ModRef, CurLoop, BECount, StoreSize, *AA, Ignore1)) { // Check if the load is the offending instruction. Ignore1.insert(LI); - if (mayLoopAccessLocation(StoreBasePtr, MRI_ModRef, CurLoop, BECount, - StoreSize, *AA, Ignore1)) { + if (mayLoopAccessLocation(StoreBasePtr, ModRefInfo::ModRef, CurLoop, + BECount, StoreSize, *AA, Ignore1)) { // Still bad. Nothing we can do. goto CleanupAndExit; } @@ -2056,8 +2056,8 @@ SmallPtrSet Ignore2; Ignore2.insert(SI); - if (mayLoopAccessLocation(LoadBasePtr, MRI_Mod, CurLoop, BECount, StoreSize, - *AA, Ignore2)) + if (mayLoopAccessLocation(LoadBasePtr, ModRefInfo::Mod, CurLoop, BECount, + StoreSize, *AA, Ignore2)) goto CleanupAndExit; // Check the stride. Index: llvm/trunk/lib/Transforms/IPO/ArgumentPromotion.cpp =================================================================== --- llvm/trunk/lib/Transforms/IPO/ArgumentPromotion.cpp +++ llvm/trunk/lib/Transforms/IPO/ArgumentPromotion.cpp @@ -719,7 +719,7 @@ BasicBlock *BB = Load->getParent(); MemoryLocation Loc = MemoryLocation::get(Load); - if (AAR.canInstructionRangeModRef(BB->front(), *Load, Loc, MRI_Mod)) + if (AAR.canInstructionRangeModRef(BB->front(), *Load, Loc, ModRefInfo::Mod)) return false; // Pointer is invalidated! // Now check every path from the entry block to the load for transparency. Index: llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp =================================================================== --- llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ llvm/trunk/lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -887,8 +887,8 @@ // base pointer and checking the region. Value *BasePtr = Expander.expandCodeFor(Start, DestInt8PtrTy, Preheader->getTerminator()); - if (mayLoopAccessLocation(BasePtr, MRI_ModRef, CurLoop, BECount, StoreSize, - *AA, Stores)) { + if (mayLoopAccessLocation(BasePtr, ModRefInfo::ModRef, CurLoop, BECount, + StoreSize, *AA, Stores)) { Expander.clear(); // If we generated new code for the base pointer, clean up. RecursivelyDeleteTriviallyDeadInstructions(BasePtr, TLI); @@ -997,7 +997,7 @@ SmallPtrSet Stores; Stores.insert(SI); - if (mayLoopAccessLocation(StoreBasePtr, MRI_ModRef, CurLoop, BECount, + if (mayLoopAccessLocation(StoreBasePtr, ModRefInfo::ModRef, CurLoop, BECount, StoreSize, *AA, Stores)) { Expander.clear(); // If we generated new code for the base pointer, clean up. @@ -1017,8 +1017,8 @@ Value *LoadBasePtr = Expander.expandCodeFor( LdStart, Builder.getInt8PtrTy(LdAS), Preheader->getTerminator()); - if (mayLoopAccessLocation(LoadBasePtr, MRI_Mod, CurLoop, BECount, StoreSize, - *AA, Stores)) { + if (mayLoopAccessLocation(LoadBasePtr, ModRefInfo::Mod, CurLoop, BECount, + StoreSize, *AA, Stores)) { Expander.clear(); // If we generated new code for the base pointer, clean up. RecursivelyDeleteTriviallyDeadInstructions(LoadBasePtr, TLI); Index: llvm/trunk/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp =================================================================== --- llvm/trunk/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp +++ llvm/trunk/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp @@ -195,7 +195,7 @@ make_range(Start.getIterator(), End.getIterator())) if (Inst.mayThrow()) return true; - return AA->canInstructionRangeModRef(Start, End, Loc, MRI_ModRef); + return AA->canInstructionRangeModRef(Start, End, Loc, ModRefInfo::ModRef); } /// Index: llvm/trunk/unittests/Analysis/AliasAnalysisTest.cpp =================================================================== --- llvm/trunk/unittests/Analysis/AliasAnalysisTest.cpp +++ llvm/trunk/unittests/Analysis/AliasAnalysisTest.cpp @@ -191,18 +191,18 @@ auto &AA = getAAResults(*F); // Check basic results - EXPECT_EQ(AA.getModRefInfo(Store1, MemoryLocation()), MRI_Mod); - EXPECT_EQ(AA.getModRefInfo(Store1, None), MRI_Mod); - EXPECT_EQ(AA.getModRefInfo(Load1, MemoryLocation()), MRI_Ref); - EXPECT_EQ(AA.getModRefInfo(Load1, None), MRI_Ref); - EXPECT_EQ(AA.getModRefInfo(Add1, MemoryLocation()), MRI_NoModRef); - EXPECT_EQ(AA.getModRefInfo(Add1, None), MRI_NoModRef); - EXPECT_EQ(AA.getModRefInfo(VAArg1, MemoryLocation()), MRI_ModRef); - EXPECT_EQ(AA.getModRefInfo(VAArg1, None), MRI_ModRef); - EXPECT_EQ(AA.getModRefInfo(CmpXChg1, MemoryLocation()), MRI_ModRef); - EXPECT_EQ(AA.getModRefInfo(CmpXChg1, None), MRI_ModRef); - EXPECT_EQ(AA.getModRefInfo(AtomicRMW, MemoryLocation()), MRI_ModRef); - EXPECT_EQ(AA.getModRefInfo(AtomicRMW, None), MRI_ModRef); + EXPECT_EQ(AA.getModRefInfo(Store1, MemoryLocation()), ModRefInfo::Mod); + EXPECT_EQ(AA.getModRefInfo(Store1, None), ModRefInfo::Mod); + EXPECT_EQ(AA.getModRefInfo(Load1, MemoryLocation()), ModRefInfo::Ref); + EXPECT_EQ(AA.getModRefInfo(Load1, None), ModRefInfo::Ref); + EXPECT_EQ(AA.getModRefInfo(Add1, MemoryLocation()), ModRefInfo::NoModRef); + EXPECT_EQ(AA.getModRefInfo(Add1, None), ModRefInfo::NoModRef); + EXPECT_EQ(AA.getModRefInfo(VAArg1, MemoryLocation()), ModRefInfo::ModRef); + EXPECT_EQ(AA.getModRefInfo(VAArg1, None), ModRefInfo::ModRef); + EXPECT_EQ(AA.getModRefInfo(CmpXChg1, MemoryLocation()), ModRefInfo::ModRef); + EXPECT_EQ(AA.getModRefInfo(CmpXChg1, None), ModRefInfo::ModRef); + EXPECT_EQ(AA.getModRefInfo(AtomicRMW, MemoryLocation()), ModRefInfo::ModRef); + EXPECT_EQ(AA.getModRefInfo(AtomicRMW, None), ModRefInfo::ModRef); } class AAPassInfraTest : public testing::Test {