Index: include/llvm/Analysis/AliasAnalysis.h =================================================================== --- include/llvm/Analysis/AliasAnalysis.h +++ include/llvm/Analysis/AliasAnalysis.h @@ -325,8 +325,8 @@ AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB); /// A convenience wrapper around the primary \c alias interface. - AliasResult alias(const Value *V1, uint64_t V1Size, const Value *V2, - uint64_t V2Size) { + AliasResult alias(const Value *V1, LocationSize V1Size, const Value *V2, + LocationSize V2Size) { return alias(MemoryLocation(V1, V1Size), MemoryLocation(V2, V2Size)); } @@ -343,8 +343,8 @@ } /// A convenience wrapper around the \c isNoAlias helper interface. - bool isNoAlias(const Value *V1, uint64_t V1Size, const Value *V2, - uint64_t V2Size) { + bool isNoAlias(const Value *V1, LocationSize V1Size, const Value *V2, + LocationSize V2Size) { return isNoAlias(MemoryLocation(V1, V1Size), MemoryLocation(V2, V2Size)); } @@ -501,7 +501,7 @@ /// getModRefInfo (for call sites) - A convenience wrapper. ModRefInfo getModRefInfo(ImmutableCallSite CS, const Value *P, - uint64_t Size) { + LocationSize Size) { return getModRefInfo(CS, MemoryLocation(P, Size)); } @@ -512,7 +512,8 @@ } /// getModRefInfo (for calls) - A convenience wrapper. - ModRefInfo getModRefInfo(const CallInst *C, const Value *P, uint64_t Size) { + ModRefInfo getModRefInfo(const CallInst *C, const Value *P, + LocationSize Size) { return getModRefInfo(C, MemoryLocation(P, Size)); } @@ -523,7 +524,8 @@ } /// getModRefInfo (for invokes) - A convenience wrapper. - ModRefInfo getModRefInfo(const InvokeInst *I, const Value *P, uint64_t Size) { + ModRefInfo getModRefInfo(const InvokeInst *I, const Value *P, + LocationSize Size) { return getModRefInfo(I, MemoryLocation(P, Size)); } @@ -532,7 +534,8 @@ ModRefInfo getModRefInfo(const LoadInst *L, const MemoryLocation &Loc); /// getModRefInfo (for loads) - A convenience wrapper. - ModRefInfo getModRefInfo(const LoadInst *L, const Value *P, uint64_t Size) { + ModRefInfo getModRefInfo(const LoadInst *L, const Value *P, + LocationSize Size) { return getModRefInfo(L, MemoryLocation(P, Size)); } @@ -541,7 +544,8 @@ ModRefInfo getModRefInfo(const StoreInst *S, const MemoryLocation &Loc); /// getModRefInfo (for stores) - A convenience wrapper. - ModRefInfo getModRefInfo(const StoreInst *S, const Value *P, uint64_t Size) { + ModRefInfo getModRefInfo(const StoreInst *S, const Value *P, + LocationSize Size) { return getModRefInfo(S, MemoryLocation(P, Size)); } @@ -550,7 +554,8 @@ ModRefInfo getModRefInfo(const FenceInst *S, const MemoryLocation &Loc); /// getModRefInfo (for fences) - A convenience wrapper. - ModRefInfo getModRefInfo(const FenceInst *S, const Value *P, uint64_t Size) { + ModRefInfo getModRefInfo(const FenceInst *S, const Value *P, + LocationSize Size) { return getModRefInfo(S, MemoryLocation(P, Size)); } @@ -580,7 +585,8 @@ ModRefInfo getModRefInfo(const VAArgInst *I, const MemoryLocation &Loc); /// getModRefInfo (for va_args) - A convenience wrapper. - ModRefInfo getModRefInfo(const VAArgInst *I, const Value *P, uint64_t Size) { + ModRefInfo getModRefInfo(const VAArgInst *I, const Value *P, + LocationSize Size) { return getModRefInfo(I, MemoryLocation(P, Size)); } @@ -590,7 +596,7 @@ /// getModRefInfo (for catchpads) - A convenience wrapper. ModRefInfo getModRefInfo(const CatchPadInst *I, const Value *P, - uint64_t Size) { + LocationSize Size) { return getModRefInfo(I, MemoryLocation(P, Size)); } @@ -600,7 +606,7 @@ /// getModRefInfo (for catchrets) - A convenience wrapper. ModRefInfo getModRefInfo(const CatchReturnInst *I, const Value *P, - uint64_t Size) { + LocationSize Size) { return getModRefInfo(I, MemoryLocation(P, Size)); } @@ -646,7 +652,7 @@ /// A convenience wrapper for constructing the memory location. ModRefInfo getModRefInfo(const Instruction *I, const Value *P, - uint64_t Size) { + LocationSize Size) { return getModRefInfo(I, MemoryLocation(P, Size)); } @@ -671,7 +677,7 @@ /// \brief A convenience wrapper to synthesize a memory location. ModRefInfo callCapturesBefore(const Instruction *I, const Value *P, - uint64_t Size, DominatorTree *DT, + LocationSize Size, DominatorTree *DT, OrderedBasicBlock *OBB = nullptr) { return callCapturesBefore(I, MemoryLocation(P, Size), DT, OBB); } @@ -687,7 +693,7 @@ /// A convenience wrapper synthesizing a memory location. bool canBasicBlockModify(const BasicBlock &BB, const Value *P, - uint64_t Size) { + LocationSize Size) { return canBasicBlockModify(BB, MemoryLocation(P, Size)); } @@ -702,7 +708,7 @@ /// A convenience wrapper synthesizing a memory location. bool canInstructionRangeModRef(const Instruction &I1, const Instruction &I2, - const Value *Ptr, uint64_t Size, + const Value *Ptr, LocationSize Size, const ModRefInfo Mode) { return canInstructionRangeModRef(I1, I2, MemoryLocation(Ptr, Size), Mode); } Index: include/llvm/Analysis/AliasSetTracker.h =================================================================== --- include/llvm/Analysis/AliasSetTracker.h +++ include/llvm/Analysis/AliasSetTracker.h @@ -52,9 +52,13 @@ PointerRec **PrevInList = nullptr; PointerRec *NextInList = nullptr; AliasSet *AS = nullptr; - uint64_t Size = 0; + LocationSize Size = LocationSize::mapEmpty(); AAMDNodes AAInfo; + // Whether the size for this record has been set at all. This makes no + // guarantees about the size being known. + bool isSizeSet() const { return Size != LocationSize::mapEmpty(); } + public: PointerRec(Value *V) : Val(V), AAInfo(DenseMapInfo::getEmptyKey()) {} @@ -69,11 +73,12 @@ return &NextInList; } - bool updateSizeAndAAInfo(uint64_t NewSize, const AAMDNodes &NewAAInfo) { + bool updateSizeAndAAInfo(LocationSize NewSize, const AAMDNodes &NewAAInfo) { bool SizeChanged = false; - if (NewSize > Size) { - Size = NewSize; - SizeChanged = true; + if (NewSize != Size) { + LocationSize OldSize = Size; + Size = isSizeSet() ? Size.combineWith(NewSize) : NewSize; + SizeChanged = OldSize != Size; } if (AAInfo == DenseMapInfo::getEmptyKey()) @@ -91,7 +96,10 @@ return SizeChanged; } - uint64_t getSize() const { return Size; } + LocationSize getSize() const { + assert(isSizeSet() && "Getting an unset size!"); + return Size; + } /// Return the AAInfo, or null if there is no information or conflicting /// information. @@ -247,7 +255,7 @@ value_type *operator->() const { return &operator*(); } Value *getPointer() const { return CurNode->getValue(); } - uint64_t getSize() const { return CurNode->getSize(); } + LocationSize getSize() const { return CurNode->getSize(); } AAMDNodes getAAInfo() const { return CurNode->getAAInfo(); } iterator& operator++() { // Preincrement @@ -287,7 +295,7 @@ void removeFromTracker(AliasSetTracker &AST); - void addPointer(AliasSetTracker &AST, PointerRec &Entry, uint64_t Size, + void addPointer(AliasSetTracker &AST, PointerRec &Entry, LocationSize Size, const AAMDNodes &AAInfo, bool KnownMustAlias = false); void addUnknownInst(Instruction *I, AliasAnalysis &AA); @@ -309,8 +317,8 @@ public: /// Return true if the specified pointer "may" (or must) alias one of the /// members in the set. - bool aliasesPointer(const Value *Ptr, uint64_t Size, const AAMDNodes &AAInfo, - AliasAnalysis &AA) const; + bool aliasesPointer(const Value *Ptr, LocationSize Size, + const AAMDNodes &AAInfo, AliasAnalysis &AA) const; bool aliasesUnknownInst(const Instruction *Inst, AliasAnalysis &AA) const; }; @@ -364,7 +372,7 @@ /// These methods return true if inserting the instruction resulted in the /// addition of a new alias set (i.e., the pointer did not alias anything). /// - void add(Value *Ptr, uint64_t Size, const AAMDNodes &AAInfo); // Add a loc. + void add(Value *Ptr, LocationSize Size, const AAMDNodes &AAInfo); // Add a loc void add(LoadInst *LI); void add(StoreInst *SI); void add(VAArgInst *VAAI); @@ -384,12 +392,12 @@ /// argument is non-null, this method sets the value to true if a new alias /// set is created to contain the pointer (because the pointer didn't alias /// anything). - AliasSet &getAliasSetForPointer(Value *P, uint64_t Size, + AliasSet &getAliasSetForPointer(Value *P, LocationSize Size, const AAMDNodes &AAInfo); /// Return the alias set containing the location specified if one exists, /// otherwise return null. - AliasSet *getAliasSetForPointerIfExists(const Value *P, uint64_t Size, + AliasSet *getAliasSetForPointerIfExists(const Value *P, LocationSize Size, const AAMDNodes &AAInfo) { return mergeAliasSetsForPointer(P, Size, AAInfo); } @@ -446,9 +454,9 @@ return *Entry; } - AliasSet &addPointer(Value *P, uint64_t Size, const AAMDNodes &AAInfo, + AliasSet &addPointer(Value *P, LocationSize Size, const AAMDNodes &AAInfo, AliasSet::AccessLattice E); - AliasSet *mergeAliasSetsForPointer(const Value *Ptr, uint64_t Size, + AliasSet *mergeAliasSetsForPointer(const Value *Ptr, LocationSize Size, const AAMDNodes &AAInfo); /// Merge all alias sets into a single set that is considered to alias any Index: include/llvm/Analysis/BasicAliasAnalysis.h =================================================================== --- include/llvm/Analysis/BasicAliasAnalysis.h +++ include/llvm/Analysis/BasicAliasAnalysis.h @@ -171,7 +171,7 @@ static bool isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp, const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject, - uint64_t ObjectAccessSize); + LocationSize ObjectAccessSize); /// \brief A Heuristic for aliasGEP that searches for a constant offset /// between the variables. @@ -183,31 +183,33 @@ /// the addition overflows. bool constantOffsetHeuristic(const SmallVectorImpl &VarIndices, - uint64_t V1Size, uint64_t V2Size, int64_t BaseOffset, - AssumptionCache *AC, DominatorTree *DT); + LocationSize V1Size, LocationSize V2Size, + int64_t BaseOffset, AssumptionCache *AC, + DominatorTree *DT); bool isValueEqualInPotentialCycles(const Value *V1, const Value *V2); void GetIndexDifference(SmallVectorImpl &Dest, const SmallVectorImpl &Src); - AliasResult aliasGEP(const GEPOperator *V1, uint64_t V1Size, + AliasResult aliasGEP(const GEPOperator *V1, LocationSize V1Size, const AAMDNodes &V1AAInfo, const Value *V2, - uint64_t V2Size, const AAMDNodes &V2AAInfo, + LocationSize V2Size, const AAMDNodes &V2AAInfo, const Value *UnderlyingV1, const Value *UnderlyingV2); - AliasResult aliasPHI(const PHINode *PN, uint64_t PNSize, + AliasResult aliasPHI(const PHINode *PN, LocationSize PNSize, const AAMDNodes &PNAAInfo, const Value *V2, - uint64_t V2Size, const AAMDNodes &V2AAInfo, + LocationSize V2Size, const AAMDNodes &V2AAInfo, const Value *UnderV2); - AliasResult aliasSelect(const SelectInst *SI, uint64_t SISize, + AliasResult aliasSelect(const SelectInst *SI, LocationSize SISize, const AAMDNodes &SIAAInfo, const Value *V2, - uint64_t V2Size, const AAMDNodes &V2AAInfo, + LocationSize V2Size, const AAMDNodes &V2AAInfo, const Value *UnderV2); - AliasResult aliasCheck(const Value *V1, uint64_t V1Size, AAMDNodes V1AATag, - const Value *V2, uint64_t V2Size, AAMDNodes V2AATag, + AliasResult aliasCheck(const Value *V1, LocationSize V1Size, + AAMDNodes V1AATag, const Value *V2, + LocationSize V2Size, AAMDNodes V2AATag, const Value *O1 = nullptr, const Value *O2 = nullptr); }; Index: include/llvm/Analysis/MemoryDependenceAnalysis.h =================================================================== --- include/llvm/Analysis/MemoryDependenceAnalysis.h +++ include/llvm/Analysis/MemoryDependenceAnalysis.h @@ -302,7 +302,7 @@ /// The maximum size of the dereferences of the pointer. /// /// May be UnknownSize if the sizes are unknown. - uint64_t Size = MemoryLocation::UnknownSize; + LocationSize Size = LocationSize::unknown(); /// The AA tags associated with dereferences of the pointer. /// /// The members may be null if there are no tags or conflicting tags. Index: include/llvm/Analysis/MemoryLocation.h =================================================================== --- include/llvm/Analysis/MemoryLocation.h +++ include/llvm/Analysis/MemoryLocation.h @@ -29,6 +29,84 @@ class MemIntrinsic; class TargetLibraryInfo; +// Represents the size of a MemoryLocation. Logically, it's an +// Optional that fits into 64 bytes. If asked to represent a +// pathologically large value, this will degrade to None. +// +// The intent is for this to also hold a bit for whether the LocationSize is +// precise in the very near future, so some of the code may seem a bit pointless +// in itself. Please see https://reviews.llvm.org/D44748 for context. +class LocationSize { + enum : uint64_t { + Unknown = ~uint64_t(0), + MapEmpty = Unknown - 1, + MapTombstone = Unknown - 2, + + MaxValue = (MapTombstone - 1), + }; + + uint64_t Value; + + // Hack to support implicit construction. This should disappear when the + // public LocationSize ctor goes away. + enum DirectConstruction { Direct }; + + constexpr LocationSize(uint64_t Raw, DirectConstruction): Value(Raw) {} +public: + constexpr LocationSize(uint64_t Raw) + : Value(Raw > MaxValue ? Unknown : Raw) {} + + constexpr static LocationSize unknown() { + return LocationSize(Unknown, Direct); + } + + // Returns a LocationSize that can sanely represent either `this` or `Other`. + LocationSize combineWith(LocationSize Other) const { + if (!hasValue() || !Other.hasValue()) + return unknown(); + return LocationSize(std::max(getValue(), Other.getValue()), Direct); + } + + bool hasValue() const { return Value != Unknown; } + uint64_t getValue() const { + assert(hasValue() && "Getting value from an unknown LocationSize!"); + return Value; + } + + bool isZero() const { return hasValue() && getValue() == 0; } + + bool operator==(const LocationSize &Other) const { + return Value == Other.Value; + } + + bool operator!=(const LocationSize &Other) const { + return !operator==(Other); + } + + // Ordering operators are not provided, since it's unclear if there's only one + // reasonable way to compare: + // - values that don't exist against values that do, and + // - precise values to imprecise values + + // Sentinel values, generally used for maps. + constexpr static LocationSize mapTombstone() { + return LocationSize(MapTombstone, Direct); + } + constexpr static LocationSize mapEmpty() { + return LocationSize(MapEmpty, Direct); + } + + // Returns an opaque value that represents this LocationSize. + uint64_t toRaw() const { return Value; } + + void print(raw_ostream &OS) const; +}; + +inline raw_ostream &operator<<(raw_ostream &OS, LocationSize LS) { + LS.print(OS); + return OS; +} + /// Representation for a specific memory location. /// /// This abstraction can be used to represent a specific location in memory. @@ -55,7 +133,7 @@ /// virtual address space, because there are restrictions on stepping out of /// one object and into another. See /// http://llvm.org/docs/LangRef.html#pointeraliasing - uint64_t Size; + LocationSize Size; /// The metadata nodes which describes the aliasing of the location (each /// member is null if that kind of information is unavailable). @@ -100,7 +178,7 @@ const TargetLibraryInfo &TLI); explicit MemoryLocation(const Value *Ptr = nullptr, - uint64_t Size = UnknownSize, + LocationSize Size = LocationSize::unknown(), const AAMDNodes &AATags = AAMDNodes()) : Ptr(Ptr), Size(Size), AATags(AATags) {} @@ -110,7 +188,7 @@ return Copy; } - MemoryLocation getWithNewSize(uint64_t NewSize) const { + MemoryLocation getWithNewSize(LocationSize NewSize) const { MemoryLocation Copy(*this); Copy.Size = NewSize; return Copy; @@ -127,17 +205,34 @@ } }; -// Specialize DenseMapInfo for MemoryLocation. +// Specialize DenseMapInfo. +template <> struct DenseMapInfo { + static inline LocationSize getEmptyKey() { + return LocationSize::mapEmpty(); + } + static inline LocationSize getTombstoneKey() { + return LocationSize::mapTombstone(); + } + static unsigned getHashValue(const LocationSize &Val) { + return DenseMapInfo::getHashValue(Val.toRaw()); + } + static bool isEqual(const LocationSize &LHS, const LocationSize &RHS) { + return LHS == RHS; + } +}; + template <> struct DenseMapInfo { static inline MemoryLocation getEmptyKey() { - return MemoryLocation(DenseMapInfo::getEmptyKey(), 0); + return MemoryLocation(DenseMapInfo::getEmptyKey(), + DenseMapInfo::getEmptyKey()); } static inline MemoryLocation getTombstoneKey() { - return MemoryLocation(DenseMapInfo::getTombstoneKey(), 0); + return MemoryLocation(DenseMapInfo::getTombstoneKey(), + DenseMapInfo::getTombstoneKey()); } static unsigned getHashValue(const MemoryLocation &Val) { return DenseMapInfo::getHashValue(Val.Ptr) ^ - DenseMapInfo::getHashValue(Val.Size) ^ + DenseMapInfo::getHashValue(Val.Size) ^ DenseMapInfo::getHashValue(Val.AATags); } static bool isEqual(const MemoryLocation &LHS, const MemoryLocation &RHS) { Index: lib/Analysis/AliasSetTracker.cpp =================================================================== --- lib/Analysis/AliasSetTracker.cpp +++ lib/Analysis/AliasSetTracker.cpp @@ -126,7 +126,7 @@ } void AliasSet::addPointer(AliasSetTracker &AST, PointerRec &Entry, - uint64_t Size, const AAMDNodes &AAInfo, + LocationSize Size, const AAMDNodes &AAInfo, bool KnownMustAlias) { assert(!Entry.hasAliasSet() && "Entry already in set!"); @@ -182,7 +182,7 @@ /// aliasesPointer - Return true if the specified pointer "may" (or must) /// alias one of the members in the set. /// -bool AliasSet::aliasesPointer(const Value *Ptr, uint64_t Size, +bool AliasSet::aliasesPointer(const Value *Ptr, LocationSize Size, const AAMDNodes &AAInfo, AliasAnalysis &AA) const { if (AliasAny) @@ -262,7 +262,7 @@ /// alias the pointer. Return the unified set, or nullptr if no set that aliases /// the pointer was found. AliasSet *AliasSetTracker::mergeAliasSetsForPointer(const Value *Ptr, - uint64_t Size, + LocationSize Size, const AAMDNodes &AAInfo) { AliasSet *FoundSet = nullptr; for (iterator I = begin(), E = end(); I != E;) { @@ -302,7 +302,8 @@ /// getAliasSetForPointer - Return the alias set that the specified pointer /// lives in. -AliasSet &AliasSetTracker::getAliasSetForPointer(Value *Pointer, uint64_t Size, +AliasSet &AliasSetTracker::getAliasSetForPointer(Value *Pointer, + LocationSize Size, const AAMDNodes &AAInfo) { AliasSet::PointerRec &Entry = getEntryFor(Pointer); @@ -347,7 +348,8 @@ return AliasSets.back(); } -void AliasSetTracker::add(Value *Ptr, uint64_t Size, const AAMDNodes &AAInfo) { +void AliasSetTracker::add(Value *Ptr, LocationSize Size, + const AAMDNodes &AAInfo) { addPointer(Ptr, Size, AAInfo, AliasSet::NoAccess); } @@ -588,7 +590,7 @@ return *AliasAnyAS; } -AliasSet &AliasSetTracker::addPointer(Value *P, uint64_t Size, +AliasSet &AliasSetTracker::addPointer(Value *P, LocationSize Size, const AAMDNodes &AAInfo, AliasSet::AccessLattice E) { AliasSet &AS = getAliasSetForPointer(P, Size, AAInfo); Index: lib/Analysis/BasicAliasAnalysis.cpp =================================================================== --- lib/Analysis/BasicAliasAnalysis.cpp +++ lib/Analysis/BasicAliasAnalysis.cpp @@ -981,9 +981,9 @@ /// Provide ad-hoc rules to disambiguate accesses through two GEP operators, /// both having the exact same pointer operand. static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1, - uint64_t V1Size, + LocationSize MaybeV1Size, const GEPOperator *GEP2, - uint64_t V2Size, + LocationSize MaybeV2Size, const DataLayout &DL) { assert(GEP1->getPointerOperand()->stripPointerCastsAndBarriers() == GEP2->getPointerOperand()->stripPointerCastsAndBarriers() && @@ -999,10 +999,13 @@ // If we don't know the size of the accesses through both GEPs, we can't // determine whether the struct fields accessed can't alias. - if (V1Size == MemoryLocation::UnknownSize || - V2Size == MemoryLocation::UnknownSize) + if (MaybeV1Size == MemoryLocation::UnknownSize || + MaybeV2Size == MemoryLocation::UnknownSize) return MayAlias; + uint64_t V1Size = MaybeV1Size.getValue(); + uint64_t V2Size = MaybeV2Size.getValue(); + ConstantInt *C1 = dyn_cast(GEP1->getOperand(GEP1->getNumOperands() - 1)); ConstantInt *C2 = @@ -1158,8 +1161,8 @@ // the highest %f1 can be is (%alloca + 3). This means %random can not be higher // than (%alloca - 1), and so is not inbounds, a contradiction. bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp, - const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject, - uint64_t ObjectAccessSize) { + const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject, + LocationSize ObjectAccessSize) { // If the object access size is unknown, or the GEP isn't inbounds, bail. if (ObjectAccessSize == MemoryLocation::UnknownSize || !GEPOp->isInBounds()) return false; @@ -1184,7 +1187,8 @@ if (DecompGEP.VarIndices.empty()) GEPBaseOffset += DecompGEP.OtherOffset; - return (GEPBaseOffset >= ObjectBaseOffset + (int64_t)ObjectAccessSize); + return GEPBaseOffset >= + ObjectBaseOffset + (int64_t)ObjectAccessSize.getValue(); } /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against @@ -1193,11 +1197,11 @@ /// We know that V1 is a GEP, but we don't know anything about V2. /// UnderlyingV1 is GetUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for /// V2. -AliasResult BasicAAResult::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size, - const AAMDNodes &V1AAInfo, const Value *V2, - uint64_t V2Size, const AAMDNodes &V2AAInfo, - const Value *UnderlyingV1, - const Value *UnderlyingV2) { +AliasResult +BasicAAResult::aliasGEP(const GEPOperator *GEP1, LocationSize V1Size, + const AAMDNodes &V1AAInfo, const Value *V2, + LocationSize V2Size, const AAMDNodes &V2AAInfo, + const Value *UnderlyingV1, const Value *UnderlyingV2) { DecomposedGEP DecompGEP1, DecompGEP2; bool GEP1MaxLookupReached = DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT); @@ -1326,7 +1330,7 @@ if (GEP1BaseOffset != 0 && DecompGEP1.VarIndices.empty()) { if (GEP1BaseOffset >= 0) { if (V2Size != MemoryLocation::UnknownSize) { - if ((uint64_t)GEP1BaseOffset < V2Size) + if ((uint64_t)GEP1BaseOffset < V2Size.getValue()) return PartialAlias; return NoAlias; } @@ -1341,7 +1345,7 @@ // stripped a gep with negative index ('gep , -1, ...). if (V1Size != MemoryLocation::UnknownSize && V2Size != MemoryLocation::UnknownSize) { - if (-(uint64_t)GEP1BaseOffset < V1Size) + if (-(uint64_t)GEP1BaseOffset < V1Size.getValue()) return PartialAlias; return NoAlias; } @@ -1391,14 +1395,16 @@ // two locations do not alias. uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1); if (V1Size != MemoryLocation::UnknownSize && - V2Size != MemoryLocation::UnknownSize && ModOffset >= V2Size && - V1Size <= Modulo - ModOffset) + V2Size != MemoryLocation::UnknownSize && + ModOffset >= V2Size.getValue() && + V1Size.getValue() <= Modulo - ModOffset) return NoAlias; // If we know all the variables are positive, then GEP1 >= GEP1BasePtr. // If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr. - if (AllPositive && GEP1BaseOffset > 0 && V2Size <= (uint64_t)GEP1BaseOffset) + if (AllPositive && GEP1BaseOffset > 0 && V2Size.hasValue() && + V2Size.getValue() <= (uint64_t)GEP1BaseOffset) return NoAlias; if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size, @@ -1426,9 +1432,10 @@ /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction /// against another. -AliasResult BasicAAResult::aliasSelect(const SelectInst *SI, uint64_t SISize, +AliasResult BasicAAResult::aliasSelect(const SelectInst *SI, + LocationSize SISize, const AAMDNodes &SIAAInfo, - const Value *V2, uint64_t V2Size, + const Value *V2, LocationSize V2Size, const AAMDNodes &V2AAInfo, const Value *UnderV2) { // If the values are Selects with the same condition, we can do a more precise @@ -1461,9 +1468,10 @@ /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against /// another. -AliasResult BasicAAResult::aliasPHI(const PHINode *PN, uint64_t PNSize, +AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize, const AAMDNodes &PNAAInfo, const Value *V2, - uint64_t V2Size, const AAMDNodes &V2AAInfo, + LocationSize V2Size, + const AAMDNodes &V2AAInfo, const Value *UnderV2) { // Track phi nodes we have visited. We use this information when we determine // value equivalence. @@ -1568,9 +1576,9 @@ /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as /// array references. -AliasResult BasicAAResult::aliasCheck(const Value *V1, uint64_t V1Size, +AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size, AAMDNodes V1AAInfo, const Value *V2, - uint64_t V2Size, AAMDNodes V2AAInfo, + LocationSize V2Size, AAMDNodes V2AAInfo, const Value *O1, const Value *O2) { // If either of the memory references is empty, it doesn't matter what the // pointer values are. @@ -1647,10 +1655,10 @@ // If the size of one access is larger than the entire object on the other // side, then we know such behavior is undefined and can assume no alias. - if ((V1Size != MemoryLocation::UnknownSize && - isObjectSmallerThan(O2, V1Size, DL, TLI)) || - (V2Size != MemoryLocation::UnknownSize && - isObjectSmallerThan(O1, V2Size, DL, TLI))) + if ((V1Size.hasValue() && + isObjectSmallerThan(O2, V1Size.getValue(), DL, TLI)) || + (V2Size.hasValue() && + isObjectSmallerThan(O1, V2Size.getValue(), DL, TLI))) return NoAlias; // Check the cache before climbing up use-def chains. This also terminates @@ -1708,10 +1716,9 @@ // If both pointers are pointing into the same object and one of them // accesses the entire object, then the accesses must overlap in some way. if (O1 == O2) - if (V1Size != MemoryLocation::UnknownSize && - V2Size != MemoryLocation::UnknownSize && - (isObjectSize(O1, V1Size, DL, TLI) || - isObjectSize(O2, V2Size, DL, TLI))) + if (V1Size.hasValue() && V2Size.hasValue() && + (isObjectSize(O1, V1Size.getValue(), DL, TLI) || + isObjectSize(O2, V2Size.getValue(), DL, TLI))) return AliasCache[Locs] = PartialAlias; // Recurse back into the best AA results we have, potentially with refined @@ -1794,8 +1801,8 @@ } bool BasicAAResult::constantOffsetHeuristic( - const SmallVectorImpl &VarIndices, uint64_t V1Size, - uint64_t V2Size, int64_t BaseOffset, AssumptionCache *AC, + const SmallVectorImpl &VarIndices, LocationSize V1Size, + LocationSize V2Size, int64_t BaseOffset, AssumptionCache *AC, DominatorTree *DT) { if (VarIndices.size() != 2 || V1Size == MemoryLocation::UnknownSize || V2Size == MemoryLocation::UnknownSize) @@ -1843,8 +1850,8 @@ // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and // V2Size can fit in the MinDiffBytes gap. - return V1Size + std::abs(BaseOffset) <= MinDiffBytes && - V2Size + std::abs(BaseOffset) <= MinDiffBytes; + return V1Size.getValue() + std::abs(BaseOffset) <= MinDiffBytes && + V2Size.getValue() + std::abs(BaseOffset) <= MinDiffBytes; } //===----------------------------------------------------------------------===// Index: lib/Analysis/CFLAndersAliasAnalysis.cpp =================================================================== --- lib/Analysis/CFLAndersAliasAnalysis.cpp +++ lib/Analysis/CFLAndersAliasAnalysis.cpp @@ -337,7 +337,7 @@ FunctionInfo(const Function &, const SmallVectorImpl &, const ReachabilitySet &, const AliasAttrMap &); - bool mayAlias(const Value *, uint64_t, const Value *, uint64_t) const; + bool mayAlias(const Value *, LocationSize, const Value *, LocationSize) const; const AliasSummary &getAliasSummary() const { return Summary; } }; @@ -516,9 +516,9 @@ } bool CFLAndersAAResult::FunctionInfo::mayAlias(const Value *LHS, - uint64_t LHSSize, + LocationSize LHSSize, const Value *RHS, - uint64_t RHSSize) const { + LocationSize RHSSize) const { assert(LHS && RHS); // Check if we've seen LHS and RHS before. Sometimes LHS or RHS can be created @@ -562,6 +562,10 @@ RHSSize == MemoryLocation::UnknownSize) return true; + assert(LHSSize.getValue() <= INT64_MAX && + RHSSize.getValue() <= INT64_MAX && + "INT64_MAX is less than a uint63_t's max?"); + for (const auto &OVal : make_range(RangePair)) { // Be conservative about UnknownOffset if (OVal.Offset == UnknownOffset) @@ -572,15 +576,11 @@ // range-overlap queries over two ranges [OVal.Offset, OVal.Offset + // LHSSize) and [0, RHSSize). - // Try to be conservative on super large offsets - if (LLVM_UNLIKELY(LHSSize > INT64_MAX || RHSSize > INT64_MAX)) - return true; - - auto LHSStart = OVal.Offset; + int64_t LHSStart = OVal.Offset; // FIXME: Do we need to guard against integer overflow? - auto LHSEnd = OVal.Offset + static_cast(LHSSize); - auto RHSStart = 0; - auto RHSEnd = static_cast(RHSSize); + int64_t LHSEnd = OVal.Offset + static_cast(LHSSize.getValue()); + int64_t RHSStart = 0; + auto RHSEnd = static_cast(RHSSize.getValue()); if (LHSEnd > RHSStart && LHSStart < RHSEnd) return true; } Index: lib/Analysis/MemoryDependenceAnalysis.cpp =================================================================== --- lib/Analysis/MemoryDependenceAnalysis.cpp +++ lib/Analysis/MemoryDependenceAnalysis.cpp @@ -1111,21 +1111,33 @@ // If we already have a cache entry for this CacheKey, we may need to do some // work to reconcile the cache entry and the current query. if (!Pair.second) { - if (CacheInfo->Size < Loc.Size) { - // The query's Size is greater than the cached one. Throw out the - // cached data and proceed with the query at the greater size. - CacheInfo->Pair = BBSkipFirstBlockPair(); - CacheInfo->Size = Loc.Size; - for (auto &Entry : CacheInfo->NonLocalDeps) - if (Instruction *Inst = Entry.getResult().getInst()) - RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey); - CacheInfo->NonLocalDeps.clear(); - } else if (CacheInfo->Size > Loc.Size) { - // This query's Size is less than the cached one. Conservatively restart - // the query using the greater size. - return getNonLocalPointerDepFromBB( - QueryInst, Pointer, Loc.getWithNewSize(CacheInfo->Size), isLoad, - StartBB, Result, Visited, SkipFirstBlock); + if (CacheInfo->Size != Loc.Size) { + // For our purposes, unknown size > all others. + bool CachedSizeIsGreater; + if (CacheInfo->Size.hasValue() != Loc.Size.hasValue()) { + CachedSizeIsGreater = Loc.Size.hasValue(); + } else { + assert(CacheInfo->Size.hasValue() && Loc.Size.hasValue() && + "Unequal sizes, but both are None?"); + CachedSizeIsGreater = CacheInfo->Size.getValue() > Loc.Size.getValue(); + } + + if (!CachedSizeIsGreater) { + // The query's Size is greater than the cached one. Throw out the + // cached data and proceed with the query at the greater size. + CacheInfo->Pair = BBSkipFirstBlockPair(); + CacheInfo->Size = Loc.Size; + for (auto &Entry : CacheInfo->NonLocalDeps) + if (Instruction *Inst = Entry.getResult().getInst()) + RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey); + CacheInfo->NonLocalDeps.clear(); + } else { + // This query's Size is less than the cached one. Conservatively restart + // the query using the greater size. + return getNonLocalPointerDepFromBB( + QueryInst, Pointer, Loc.getWithNewSize(CacheInfo->Size), isLoad, + StartBB, Result, Visited, SkipFirstBlock); + } } // If the query's AATags are inconsistent with the cached one, Index: lib/Analysis/MemoryLocation.cpp =================================================================== --- lib/Analysis/MemoryLocation.cpp +++ lib/Analysis/MemoryLocation.cpp @@ -18,6 +18,19 @@ #include "llvm/IR/Type.h" using namespace llvm; +void LocationSize::print(raw_ostream &OS) const { + OS << "LocationSize("; + if (!hasValue()) + OS << "unknown"; + else if (*this == mapTombstone()) + OS << "mapTombstone"; + else if (*this == mapEmpty()) + OS << "mapEmpty"; + else + OS << getValue(); + OS << ')'; +} + MemoryLocation MemoryLocation::get(const LoadInst *LI) { AAMDNodes AATags; LI->getAAMetadata(AATags); Index: lib/Analysis/ScalarEvolutionAliasAnalysis.cpp =================================================================== --- lib/Analysis/ScalarEvolutionAliasAnalysis.cpp +++ lib/Analysis/ScalarEvolutionAliasAnalysis.cpp @@ -27,7 +27,7 @@ // If either of the memory references is empty, it doesn't matter what the // pointer values are. This allows the code below to ignore this special // case. - if (LocA.Size == 0 || LocB.Size == 0) + if (LocA.Size.isZero() || LocB.Size.isZero()) return NoAlias; // This is SCEVAAResult. Get the SCEVs! @@ -40,11 +40,12 @@ // If something is known about the difference between the two addresses, // see if it's enough to prove a NoAlias. - if (SE.getEffectiveSCEVType(AS->getType()) == - SE.getEffectiveSCEVType(BS->getType())) { + if (LocA.Size.hasValue() && LocB.Size.hasValue() && + SE.getEffectiveSCEVType(AS->getType()) == + SE.getEffectiveSCEVType(BS->getType())) { unsigned BitWidth = SE.getTypeSizeInBits(AS->getType()); - APInt ASizeInt(BitWidth, LocA.Size); - APInt BSizeInt(BitWidth, LocB.Size); + APInt ASizeInt(BitWidth, LocA.Size.getValue()); + APInt BSizeInt(BitWidth, LocB.Size.getValue()); // Compute the difference between the two pointers. const SCEV *BA = SE.getMinusSCEV(BS, AS); Index: lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp =================================================================== --- lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp +++ lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp @@ -1969,7 +1969,7 @@ // Get the location that may be stored across the loop. Since the access // is strided positively through memory, we say that the modified location // starts at the pointer and has infinite size. - uint64_t AccessSize = MemoryLocation::UnknownSize; + LocationSize AccessSize = MemoryLocation::UnknownSize; // If the loop iterates a fixed number of times, we can refine the access // size to be exactly the size of the memset, which is (BECount+1)*StoreSize Index: lib/Transforms/Scalar/DeadStoreElimination.cpp =================================================================== --- lib/Transforms/Scalar/DeadStoreElimination.cpp +++ lib/Transforms/Scalar/DeadStoreElimination.cpp @@ -335,11 +335,13 @@ int64_t &EarlierOff, int64_t &LaterOff, Instruction *DepWrite, InstOverlapIntervalsTy &IOL) { - // If we don't know the sizes of either access, then we can't do a comparison. if (Later.Size == MemoryLocation::UnknownSize || Earlier.Size == MemoryLocation::UnknownSize) return OW_Unknown; + uint64_t LaterSize = Later.Size.getValue(); + uint64_t EarlierSize = Earlier.Size.getValue(); + const Value *P1 = Earlier.Ptr->stripPointerCasts(); const Value *P2 = Later.Ptr->stripPointerCasts(); @@ -347,7 +349,7 @@ // the later store was larger than the earlier store. if (P1 == P2) { // Make sure that the Later size is >= the Earlier size. - if (Later.Size >= Earlier.Size) + if (LaterSize >= EarlierSize) return OW_Complete; } @@ -365,7 +367,7 @@ // If the "Later" store is to a recognizable object, get its size. uint64_t ObjectSize = getPointerSize(UO2, DL, TLI); if (ObjectSize != MemoryLocation::UnknownSize) - if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size) + if (ObjectSize == LaterSize && ObjectSize >= EarlierSize) return OW_Complete; // Okay, we have stores to two completely different pointers. Try to @@ -396,8 +398,8 @@ // // We have to be careful here as *Off is signed while *.Size is unsigned. if (EarlierOff >= LaterOff && - Later.Size >= Earlier.Size && - uint64_t(EarlierOff - LaterOff) + Earlier.Size <= Later.Size) + LaterSize >= EarlierSize && + uint64_t(EarlierOff - LaterOff) + EarlierSize <= LaterSize) return OW_Complete; // We may now overlap, although the overlap is not complete. There might also @@ -406,20 +408,20 @@ // Note: The correctness of this logic depends on the fact that this function // is not even called providing DepWrite when there are any intervening reads. if (EnablePartialOverwriteTracking && - LaterOff < int64_t(EarlierOff + Earlier.Size) && - int64_t(LaterOff + Later.Size) >= EarlierOff) { + LaterOff < int64_t(EarlierOff + EarlierSize) && + int64_t(LaterOff + LaterSize) >= EarlierOff) { // Insert our part of the overlap into the map. auto &IM = IOL[DepWrite]; DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOff << ", " << - int64_t(EarlierOff + Earlier.Size) << ") Later [" << - LaterOff << ", " << int64_t(LaterOff + Later.Size) << ")\n"); + int64_t(EarlierOff + EarlierSize) << ") Later [" << + LaterOff << ", " << int64_t(LaterOff + LaterSize) << ")\n"); // Make sure that we only insert non-overlapping intervals and combine // adjacent intervals. The intervals are stored in the map with the ending // offset as the key (in the half-open sense) and the starting offset as // the value. - int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + Later.Size; + int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + LaterSize; // Find any intervals ending at, or after, LaterIntStart which start // before LaterIntEnd. @@ -449,10 +451,10 @@ ILI = IM.begin(); if (ILI->second <= EarlierOff && - ILI->first >= int64_t(EarlierOff + Earlier.Size)) { + ILI->first >= int64_t(EarlierOff + EarlierSize)) { DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier [" << EarlierOff << ", " << - int64_t(EarlierOff + Earlier.Size) << + int64_t(EarlierOff + EarlierSize) << ") Composite Later [" << ILI->second << ", " << ILI->first << ")\n"); ++NumCompletePartials; @@ -463,12 +465,12 @@ // Check for an earlier store which writes to all the memory locations that // the later store writes to. if (EnablePartialStoreMerging && LaterOff >= EarlierOff && - int64_t(EarlierOff + Earlier.Size) > LaterOff && - uint64_t(LaterOff - EarlierOff) + Later.Size <= Earlier.Size) { + int64_t(EarlierOff + EarlierSize) > LaterOff && + uint64_t(LaterOff - EarlierOff) + LaterSize <= EarlierSize) { DEBUG(dbgs() << "DSE: Partial overwrite an earlier load [" << EarlierOff - << ", " << int64_t(EarlierOff + Earlier.Size) + << ", " << int64_t(EarlierOff + EarlierSize) << ") by a later store [" << LaterOff << ", " - << int64_t(LaterOff + Later.Size) << ")\n"); + << int64_t(LaterOff + LaterSize) << ")\n"); // TODO: Maybe come up with a better name? return OW_PartialEarlierWithFullLater; } @@ -482,8 +484,8 @@ // In this case we may want to trim the size of earlier to avoid generating // writes to addresses which will definitely be overwritten later if (!EnablePartialOverwriteTracking && - (LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + Earlier.Size) && - int64_t(LaterOff + Later.Size) >= int64_t(EarlierOff + Earlier.Size))) + (LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + EarlierSize) && + int64_t(LaterOff + LaterSize) >= int64_t(EarlierOff + EarlierSize))) return OW_End; // Finally, we also need to check if the later store overwrites the beginning @@ -496,9 +498,8 @@ // of earlier to avoid generating writes to addresses which will definitely // be overwritten later. if (!EnablePartialOverwriteTracking && - (LaterOff <= EarlierOff && int64_t(LaterOff + Later.Size) > EarlierOff)) { - assert(int64_t(LaterOff + Later.Size) < - int64_t(EarlierOff + Earlier.Size) && + (LaterOff <= EarlierOff && int64_t(LaterOff + LaterSize) > EarlierOff)) { + assert(int64_t(LaterOff + LaterSize) < int64_t(EarlierOff + EarlierSize) && "Expect to be handled as OW_Complete"); return OW_Begin; } @@ -976,11 +977,10 @@ Instruction *EarlierWrite = OI.first; MemoryLocation Loc = getLocForWrite(EarlierWrite); assert(isRemovable(EarlierWrite) && "Expect only removable instruction"); - assert(Loc.Size != MemoryLocation::UnknownSize && "Unexpected mem loc"); + auto EarlierSize = int64_t(Loc.Size.getValue()); const Value *Ptr = Loc.Ptr->stripPointerCasts(); int64_t EarlierStart = 0; - int64_t EarlierSize = int64_t(Loc.Size); GetPointerBaseWithConstantOffset(Ptr, EarlierStart, DL); OverlapIntervalsTy &IntervalMap = OI.second; Changed |= @@ -1158,6 +1158,11 @@ OverwriteResult OR = isOverwrite(Loc, DepLoc, DL, *TLI, DepWriteOffset, InstWriteOffset, DepWrite, IOL); + + if (OR != OW_Unknown) + assert(Loc.Size.hasValue() && DepLoc.Size.hasValue() && + "We shouldn't classify overwrites of unknown size!"); + if (OR == OW_Complete) { DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *DepWrite << "\n KILLER: " << *Inst << '\n'); @@ -1176,8 +1181,9 @@ assert(!EnablePartialOverwriteTracking && "Do not expect to perform " "when partial-overwrite " "tracking is enabled"); - int64_t EarlierSize = DepLoc.Size; - int64_t LaterSize = Loc.Size; + // The overwrite result is known, so these must be known, too. + int64_t EarlierSize = DepLoc.Size.getValue(); + int64_t LaterSize = Loc.Size.getValue(); bool IsOverwriteEnd = (OR == OW_End); MadeChange |= tryToShorten(DepWrite, DepWriteOffset, EarlierSize, InstWriteOffset, LaterSize, IsOverwriteEnd); Index: test/Analysis/AliasSet/intrinsics.ll =================================================================== --- test/Analysis/AliasSet/intrinsics.ll +++ test/Analysis/AliasSet/intrinsics.ll @@ -2,9 +2,9 @@ ; CHECK: Alias sets for function 'test1': ; CHECK: Alias Set Tracker: 2 alias sets for 2 pointer values. -; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Pointers: (i8* %a, 1) +; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Pointers: (i8* %a, LocationSize(1)) ; CHECK-NOT: 1 Unknown instruction -; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Pointers: (i8* %b, 1) +; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Pointers: (i8* %b, LocationSize(1)) define void @test1(i32 %c) { entry: %a = alloca i8, align 1 Index: test/Analysis/AliasSet/memtransfer.ll =================================================================== --- test/Analysis/AliasSet/memtransfer.ll +++ test/Analysis/AliasSet/memtransfer.ll @@ -5,10 +5,10 @@ ; CHECK: Alias sets for function 'test1': ; CHECK: Alias Set Tracker: 3 alias sets for 4 pointer values. -; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Pointers: (i8* %a, 1) +; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Pointers: (i8* %a, LocationSize(1)) ; CHECK-NOT: 1 Unknown instructions -; CHECK: AliasSet[0x{{[0-9a-f]+}}, 2] may alias, Mod/Ref Pointers: (i8* %s, 1), (i8* %d, 1) -; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Pointers: (i8* %b, 1) +; CHECK: AliasSet[0x{{[0-9a-f]+}}, 2] may alias, Mod/Ref Pointers: (i8* %s, LocationSize(1)), (i8* %d, LocationSize(1)) +; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Pointers: (i8* %b, LocationSize(1)) define void @test1(i8* %s, i8* %d) { entry: %a = alloca i8, align 1 @@ -21,10 +21,10 @@ ; CHECK: Alias sets for function 'test2': ; CHECK: Alias Set Tracker: 3 alias sets for 4 pointer values. -; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Pointers: (i8* %a, 1) +; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Pointers: (i8* %a, LocationSize(1)) ; CHECK-NOT: 1 Unknown instructions -; CHECK: AliasSet[0x{{[0-9a-f]+}}, 2] may alias, Mod/Ref [volatile] Pointers: (i8* %s, 1), (i8* %d, 1) -; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Pointers: (i8* %b, 1) +; CHECK: AliasSet[0x{{[0-9a-f]+}}, 2] may alias, Mod/Ref [volatile] Pointers: (i8* %s, LocationSize(1)), (i8* %d, LocationSize(1)) +; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Pointers: (i8* %b, LocationSize(1)) define void @test2(i8* %s, i8* %d) { entry: %a = alloca i8, align 1 @@ -37,10 +37,10 @@ ; CHECK: Alias sets for function 'test3': ; CHECK: Alias Set Tracker: 3 alias sets for 4 pointer values. -; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Pointers: (i8* %a, 1) +; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Pointers: (i8* %a, LocationSize(1)) ; CHECK-NOT: 1 Unknown instructions -; CHECK: AliasSet[0x{{[0-9a-f]+}}, 2] may alias, Mod/Ref Pointers: (i8* %s, 1), (i8* %d, 1) -; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Pointers: (i8* %b, 1) +; CHECK: AliasSet[0x{{[0-9a-f]+}}, 2] may alias, Mod/Ref Pointers: (i8* %s, LocationSize(1)), (i8* %d, LocationSize(1)) +; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Pointers: (i8* %b, LocationSize(1)) define void @test3(i8* %s, i8* %d) { entry: %a = alloca i8, align 1 @@ -53,10 +53,10 @@ ; CHECK: Alias sets for function 'test4': ; CHECK: Alias Set Tracker: 3 alias sets for 4 pointer values. -; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Pointers: (i8* %a, 1) +; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Pointers: (i8* %a, LocationSize(1)) ; CHECK-NOT: 1 Unknown instructions -; CHECK: AliasSet[0x{{[0-9a-f]+}}, 2] may alias, Mod/Ref [volatile] Pointers: (i8* %s, 1), (i8* %d, 1) -; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Pointers: (i8* %b, 1) +; CHECK: AliasSet[0x{{[0-9a-f]+}}, 2] may alias, Mod/Ref [volatile] Pointers: (i8* %s, LocationSize(1)), (i8* %d, LocationSize(1)) +; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Pointers: (i8* %b, LocationSize(1)) define void @test4(i8* %s, i8* %d) { entry: %a = alloca i8, align 1 @@ -69,8 +69,8 @@ ; CHECK: Alias sets for function 'test5': ; CHECK: Alias Set Tracker: 2 alias sets for 2 pointer values. -; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod/Ref Pointers: (i8* %a, 1) -; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Pointers: (i8* %b, 1) +; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod/Ref Pointers: (i8* %a, LocationSize(1)) +; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Pointers: (i8* %b, LocationSize(1)) define void @test5() { entry: %a = alloca i8, align 1 @@ -83,8 +83,8 @@ ; CHECK: Alias sets for function 'test6': ; CHECK: Alias Set Tracker: 2 alias sets for 2 pointer values. -; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod/Ref Pointers: (i8* %a, 1) -; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Pointers: (i8* %b, 1) +; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod/Ref Pointers: (i8* %a, LocationSize(1)) +; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod Pointers: (i8* %b, LocationSize(1)) define void @test6() { entry: %a = alloca i8, align 1 @@ -97,8 +97,8 @@ ; CHECK: Alias sets for function 'test7': ; CHECK: Alias Set Tracker: 2 alias sets for 2 pointer values. -; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod/Ref Pointers: (i8* %a, 1) -; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod/Ref Pointers: (i8* %b, 1) +; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod/Ref Pointers: (i8* %a, LocationSize(1)) +; CHECK: AliasSet[0x{{[0-9a-f]+}}, 1] must alias, Mod/Ref Pointers: (i8* %b, LocationSize(1)) define void @test7() { entry: %a = alloca i8, align 1 Index: test/Analysis/AliasSet/saturation.ll =================================================================== --- test/Analysis/AliasSet/saturation.ll +++ test/Analysis/AliasSet/saturation.ll @@ -2,10 +2,10 @@ ; RUN: opt -basicaa -print-alias-sets -alias-set-saturation-threshold=1 -S -o - < %s 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=SAT ; CHECK-LABEL: 'allmust' -; CHECK: AliasSet[{{.*}}, 1] must alias, Mod Pointers: (i32* %a, 4) -; CHECK: AliasSet[{{.*}}, 1] must alias, Mod Pointers: (i32* %b, 4) -; CHECK: AliasSet[{{.*}}, 1] must alias, Mod Pointers: (i32* %c, 4) -; CHECK: AliasSet[{{.*}}, 1] must alias, Mod Pointers: (i32* %d, 4) +; CHECK: AliasSet[{{.*}}, 1] must alias, Mod Pointers: (i32* %a, LocationSize(4)) +; CHECK: AliasSet[{{.*}}, 1] must alias, Mod Pointers: (i32* %b, LocationSize(4)) +; CHECK: AliasSet[{{.*}}, 1] must alias, Mod Pointers: (i32* %c, LocationSize(4)) +; CHECK: AliasSet[{{.*}}, 1] must alias, Mod Pointers: (i32* %d, LocationSize(4)) define void @allmust() { %a = alloca i32 %b = alloca i32 @@ -19,11 +19,11 @@ } ; CHECK-LABEL :'mergemay' -; NOSAT: AliasSet[{{.*}}, 2] may alias, Mod Pointers: (i32* %a, 4), (i32* %a1, 4) -; NOSAT: AliasSet[{{.*}}, 1] must alias, Mod Pointers: (i32* %b, 4) +; NOSAT: AliasSet[{{.*}}, 2] may alias, Mod Pointers: (i32* %a, LocationSize(4)), (i32* %a1, LocationSize(4)) +; NOSAT: AliasSet[{{.*}}, 1] must alias, Mod Pointers: (i32* %b, LocationSize(4)) ; SAT: AliasSet[{{.*}}, 2] may alias, Mod forwarding to 0x[[FWD:[0-9a-f]*]] ; SAT: AliasSet[{{.*}}, 1] must alias, Mod forwarding to 0x[[FWD]] -; SAT: AliasSet[0x[[FWD]], 2] may alias, Mod/Ref Pointers: (i32* %a, 4), (i32* %a1, 4), (i32* %b, 4) +; SAT: AliasSet[0x[[FWD]], 2] may alias, Mod/Ref Pointers: (i32* %a, LocationSize(4)), (i32* %a1, LocationSize(4)), (i32* %b, LocationSize(4)) define void @mergemay(i32 %k) { %a = alloca i32 %b = alloca i32 @@ -35,13 +35,13 @@ } ; CHECK-LABEL: 'mergemust' -; NOSAT: AliasSet[{{.*}}, 1] must alias, Mod Pointers: (i32* %a, 4) -; NOSAT: AliasSet[{{.*}}, 1] must alias, Mod Pointers: (i32* %b, 4) -; NOSAT: AliasSet[{{.*}}, 2] may alias, Mod Pointers: (i32* %c, 4), (i32* %d, 4) +; NOSAT: AliasSet[{{.*}}, 1] must alias, Mod Pointers: (i32* %a, LocationSize(4)) +; NOSAT: AliasSet[{{.*}}, 1] must alias, Mod Pointers: (i32* %b, LocationSize(4)) +; NOSAT: AliasSet[{{.*}}, 2] may alias, Mod Pointers: (i32* %c, LocationSize(4)), (i32* %d, LocationSize(4)) ; SAT: AliasSet[{{.*}}, 1] must alias, Mod forwarding to 0x[[FWD:[0-9a-f]*]] ; SAT: AliasSet[{{.*}}, 1] must alias, Mod forwarding to 0x[[FWD]] ; SAT: AliasSet[{{.*}}, 2] may alias, Mod forwarding to 0x[[FWD]] -; SAT: AliasSet[0x[[FWD]], 3] may alias, Mod/Ref Pointers: (i32* %a, 4), (i32* %b, 4), (i32* %c, 4), (i32* %d, 4) +; SAT: AliasSet[0x[[FWD]], 3] may alias, Mod/Ref Pointers: (i32* %a, LocationSize(4)), (i32* %b, LocationSize(4)), (i32* %c, LocationSize(4)), (i32* %d, LocationSize(4)) define void @mergemust(i32* %c, i32* %d) { %a = alloca i32 %b = alloca i32