Index: llvm/trunk/include/llvm/ADT/PointerSumType.h =================================================================== --- llvm/trunk/include/llvm/ADT/PointerSumType.h +++ llvm/trunk/include/llvm/ADT/PointerSumType.h @@ -58,56 +58,140 @@ /// and may be desirable to set to a state that is particularly desirable to /// default construct. /// +/// Having a supported zero-valued tag also enables getting the address of a +/// pointer stored with that tag provided it is stored in its natural bit +/// representation. This works because in the case of a zero-valued tag, the +/// pointer's value is directly stored into this object and we can expose the +/// address of that internal storage. This is especially useful when building an +/// `ArrayRef` of a single pointer stored in a sum type. +/// /// There is no support for constructing or accessing with a dynamic tag as /// that would fundamentally violate the type safety provided by the sum type. template class PointerSumType { - uintptr_t Value = 0; - using HelperT = detail::PointerSumTypeHelper; + // We keep both the raw value and the min tag value's pointer in a union. When + // the minimum tag value is zero, this allows code below to cleanly expose the + // address of the zero-tag pointer instead of just the zero-tag pointer + // itself. This is especially useful when building `ArrayRef`s out of a single + // pointer. However, we have to carefully access the union due to the active + // member potentially changing. When we *store* a new value, we directly + // access the union to allow us to store using the obvious types. However, + // when we *read* a value, we copy the underlying storage out to avoid relying + // on one member or the other being active. + union StorageT { + // Ensure we get a null default constructed value. + uintptr_t Value = 0; + + typename HelperT::template Lookup::PointerT MinTagPointer; + }; + + StorageT Storage; + public: constexpr PointerSumType() = default; + /// A typed setter to a given tagged member of the sum type. + template + void set(typename HelperT::template Lookup::PointerT Pointer) { + void *V = HelperT::template Lookup::TraitsT::getAsVoidPointer(Pointer); + assert((reinterpret_cast(V) & HelperT::TagMask) == 0 && + "Pointer is insufficiently aligned to store the discriminant!"); + Storage.Value = reinterpret_cast(V) | N; + } + /// A typed constructor for a specific tagged member of the sum type. template static PointerSumType create(typename HelperT::template Lookup::PointerT Pointer) { PointerSumType Result; - void *V = HelperT::template Lookup::TraitsT::getAsVoidPointer(Pointer); - assert((reinterpret_cast(V) & HelperT::TagMask) == 0 && - "Pointer is insufficiently aligned to store the discriminant!"); - Result.Value = reinterpret_cast(V) | N; + Result.set(Pointer); return Result; } - TagT getTag() const { return static_cast(Value & HelperT::TagMask); } + /// Clear the value to null with the min tag type. + void clear() { set(nullptr); } + + TagT getTag() const { + return static_cast(getOpaqueValue() & HelperT::TagMask); + } template bool is() const { return N == getTag(); } template typename HelperT::template Lookup::PointerT get() const { - void *P = is() ? getImpl() : nullptr; + void *P = is() ? getVoidPtr() : nullptr; return HelperT::template Lookup::TraitsT::getFromVoidPointer(P); } template typename HelperT::template Lookup::PointerT cast() const { assert(is() && "This instance has a different active member."); - return HelperT::template Lookup::TraitsT::getFromVoidPointer(getImpl()); + return HelperT::template Lookup::TraitsT::getFromVoidPointer( + getVoidPtr()); + } + + /// If the tag is zero and the pointer's value isn't changed when being + /// stored, get the address of the stored value type-punned to the zero-tag's + /// pointer type. + typename HelperT::template Lookup::PointerT const * + getAddrOfZeroTagPointer() const { + return const_cast(this)->getAddrOfZeroTagPointer(); } - explicit operator bool() const { return Value & HelperT::PointerMask; } - bool operator==(const PointerSumType &R) const { return Value == R.Value; } - bool operator!=(const PointerSumType &R) const { return Value != R.Value; } - bool operator<(const PointerSumType &R) const { return Value < R.Value; } - bool operator>(const PointerSumType &R) const { return Value > R.Value; } - bool operator<=(const PointerSumType &R) const { return Value <= R.Value; } - bool operator>=(const PointerSumType &R) const { return Value >= R.Value; } + /// If the tag is zero and the pointer's value isn't changed when being + /// stored, get the address of the stored value type-punned to the zero-tag's + /// pointer type. + typename HelperT::template Lookup::PointerT * + getAddrOfZeroTagPointer() { + static_assert(HelperT::MinTag == 0, "Non-zero minimum tag value!"); + assert(is() && "The active tag is not zero!"); + // Store the initial value of the pointer when read out of our storage. + auto InitialPtr = get(); + // Now update the active member of the union to be the actual pointer-typed + // member so that accessing it indirectly through the returned address is + // valid. + Storage.MinTagPointer = InitialPtr; + // Finally, validate that this was a no-op as expected by reading it back + // out using the same underlying-storage read as above. + assert(InitialPtr == get() && + "Switching to typed storage changed the pointer returned!"); + // Now we can correctly return an address to typed storage. + return &Storage.MinTagPointer; + } + + explicit operator bool() const { + return getOpaqueValue() & HelperT::PointerMask; + } + bool operator==(const PointerSumType &R) const { + return getOpaqueValue() == R.getOpaqueValue(); + } + bool operator!=(const PointerSumType &R) const { + return getOpaqueValue() != R.getOpaqueValue(); + } + bool operator<(const PointerSumType &R) const { + return getOpaqueValue() < R.getOpaqueValue(); + } + bool operator>(const PointerSumType &R) const { + return getOpaqueValue() > R.getOpaqueValue(); + } + bool operator<=(const PointerSumType &R) const { + return getOpaqueValue() <= R.getOpaqueValue(); + } + bool operator>=(const PointerSumType &R) const { + return getOpaqueValue() >= R.getOpaqueValue(); + } - uintptr_t getOpaqueValue() const { return Value; } + uintptr_t getOpaqueValue() const { + uintptr_t Value; + // Read the underlying storage of the union, regardless of the active + // member. + memcpy(&Value, &Storage, sizeof(Value)); + return Value; + } protected: - void *getImpl() const { - return reinterpret_cast(Value & HelperT::PointerMask); + void *getVoidPtr() const { + return reinterpret_cast(getOpaqueValue() & HelperT::PointerMask); } }; @@ -151,8 +235,9 @@ enum { NumTagBits = Min::value }; // Also compute the smallest discriminant and various masks for convenience. + constexpr static TagT MinTag = + static_cast(Min::value); enum : uint64_t { - MinTag = Min::value, PointerMask = static_cast(-1) << NumTagBits, TagMask = ~PointerMask }; Index: llvm/trunk/include/llvm/CodeGen/MachineFunction.h =================================================================== --- llvm/trunk/include/llvm/CodeGen/MachineFunction.h +++ llvm/trunk/include/llvm/CodeGen/MachineFunction.h @@ -711,23 +711,14 @@ /// Allocate and initialize a register mask with @p NumRegister bits. uint32_t *allocateRegMask(); - /// allocateMemRefsArray - Allocate an array to hold MachineMemOperand - /// pointers. This array is owned by the MachineFunction. - MachineInstr::mmo_iterator allocateMemRefsArray(unsigned long Num); - - /// extractLoadMemRefs - Allocate an array and populate it with just the - /// load information from the given MachineMemOperand sequence. - std::pair - extractLoadMemRefs(MachineInstr::mmo_iterator Begin, - MachineInstr::mmo_iterator End); - - /// extractStoreMemRefs - Allocate an array and populate it with just the - /// store information from the given MachineMemOperand sequence. - std::pair - extractStoreMemRefs(MachineInstr::mmo_iterator Begin, - MachineInstr::mmo_iterator End); + /// Allocate and construct an extra info structure for a `MachineInstr`. + /// + /// This is allocated on the function's allocator and so lives the life of + /// the function. + MachineInstr::ExtraInfo * + createMIExtraInfo(ArrayRef MMOs, + MCSymbol *PreInstrSymbol = nullptr, + MCSymbol *PostInstrSymbol = nullptr); /// Allocate a string and populate it with the given external symbol name. const char *createExternalSymbolName(StringRef Name); Index: llvm/trunk/include/llvm/CodeGen/MachineInstr.h =================================================================== --- llvm/trunk/include/llvm/CodeGen/MachineInstr.h +++ llvm/trunk/include/llvm/CodeGen/MachineInstr.h @@ -17,16 +17,20 @@ #define LLVM_CODEGEN_MACHINEINSTR_H #include "llvm/ADT/DenseMapInfo.h" +#include "llvm/ADT/PointerSumType.h" #include "llvm/ADT/ilist.h" #include "llvm/ADT/ilist_node.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/CodeGen/MachineMemOperand.h" #include "llvm/CodeGen/MachineOperand.h" #include "llvm/CodeGen/TargetOpcodes.h" #include "llvm/IR/DebugLoc.h" #include "llvm/IR/InlineAsm.h" #include "llvm/MC/MCInstrDesc.h" +#include "llvm/MC/MCSymbol.h" #include "llvm/Support/ArrayRecycler.h" +#include "llvm/Support/TrailingObjects.h" #include #include #include @@ -61,7 +65,7 @@ : public ilist_node_with_parent> { public: - using mmo_iterator = MachineMemOperand **; + using mmo_iterator = ArrayRef::iterator; /// Flags to specify different kinds of comments to output in /// assembly code. These flags carry semantic information not @@ -118,14 +122,102 @@ // anything other than to convey comment // information to AsmPrinter. - uint8_t NumMemRefs = 0; // Information on memory references. - // Note that MemRefs == nullptr, means 'don't know', not 'no memory access'. - // Calling code must treat missing information conservatively. If the number - // of memory operands required to be precise exceeds the maximum value of - // NumMemRefs - currently 256 - we remove the operands entirely. Note also - // that this is a non-owning reference to a shared copy on write buffer owned - // by the MachineFunction and created via MF.allocateMemRefsArray. - mmo_iterator MemRefs = nullptr; + /// Internal implementation detail class that provides out-of-line storage for + /// extra info used by the machine instruction when this info cannot be stored + /// in-line within the instruction itself. + /// + /// This has to be defined eagerly due to the implementation constraints of + /// `PointerSumType` where it is used. + class ExtraInfo final + : TrailingObjects { + public: + static ExtraInfo *create(BumpPtrAllocator &Allocator, + ArrayRef MMOs, + MCSymbol *PreInstrSymbol = nullptr, + MCSymbol *PostInstrSymbol = nullptr) { + bool HasPreInstrSymbol = PreInstrSymbol != nullptr; + bool HasPostInstrSymbol = PostInstrSymbol != nullptr; + auto *Result = new (Allocator.Allocate( + totalSizeToAlloc( + MMOs.size(), HasPreInstrSymbol + HasPostInstrSymbol), + alignof(ExtraInfo))) + ExtraInfo(MMOs.size(), HasPreInstrSymbol, HasPostInstrSymbol); + + // Copy the actual data into the trailing objects. + std::copy(MMOs.begin(), MMOs.end(), + Result->getTrailingObjects()); + + if (HasPreInstrSymbol) + Result->getTrailingObjects()[0] = PreInstrSymbol; + if (HasPostInstrSymbol) + Result->getTrailingObjects()[HasPreInstrSymbol] = + PostInstrSymbol; + + return Result; + } + + ArrayRef getMMOs() const { + return makeArrayRef(getTrailingObjects(), NumMMOs); + } + + MCSymbol *getPreInstrSymbol() const { + return HasPreInstrSymbol ? getTrailingObjects()[0] : nullptr; + } + + MCSymbol *getPostInstrSymbol() const { + return HasPostInstrSymbol + ? getTrailingObjects()[HasPreInstrSymbol] + : nullptr; + } + + private: + friend TrailingObjects; + + // Description of the extra info, used to interpret the actual optional + // data appended. + // + // Note that this is not terribly space optimized. This leaves a great deal + // of flexibility to fit more in here later. + const int NumMMOs; + const bool HasPreInstrSymbol; + const bool HasPostInstrSymbol; + + // Implement the `TrailingObjects` internal API. + size_t numTrailingObjects(OverloadToken) const { + return NumMMOs; + } + size_t numTrailingObjects(OverloadToken) const { + return HasPreInstrSymbol + HasPostInstrSymbol; + } + + // Just a boring constructor to allow us to initialize the sizes. Always use + // the `create` routine above. + ExtraInfo(int NumMMOs, bool HasPreInstrSymbol, bool HasPostInstrSymbol) + : NumMMOs(NumMMOs), HasPreInstrSymbol(HasPreInstrSymbol), + HasPostInstrSymbol(HasPostInstrSymbol) {} + }; + + /// Enumeration of the kinds of inline extra info available. It is important + /// that the `MachineMemOperand` inline kind has a tag value of zero to make + /// it accessible as an `ArrayRef`. + enum ExtraInfoInlineKinds { + EIIK_MMO = 0, + EIIK_PreInstrSymbol, + EIIK_PostInstrSymbol, + EIIK_OutOfLine + }; + + // We store extra information about the instruction here. The common case is + // expected to be nothing or a single pointer (typically a MMO or a symbol). + // We work to optimize this common case by storing it inline here rather than + // requiring a separate allocation, but we fall back to an allocation when + // multiple pointers are needed. + PointerSumType, + PointerSumTypeMember, + PointerSumTypeMember, + PointerSumTypeMember> + Info; DebugLoc debugLoc; // Source line information. @@ -412,28 +504,70 @@ return I - operands_begin(); } - /// Access to memory operands of the instruction - mmo_iterator memoperands_begin() const { return MemRefs; } - mmo_iterator memoperands_end() const { return MemRefs + NumMemRefs; } + /// Access to memory operands of the instruction. If there are none, that does + /// not imply anything about whether the function accesses memory. Instead, + /// the caller must behave conservatively. + ArrayRef memoperands() const { + if (!Info) + return {}; + + if (Info.is()) + return makeArrayRef(Info.getAddrOfZeroTagPointer(), 1); + + if (ExtraInfo *EI = Info.get()) + return EI->getMMOs(); + + return {}; + } + + /// Access to memory operands of the instruction. + /// + /// If `memoperands_begin() == memoperands_end()`, that does not imply + /// anything about whether the function accesses memory. Instead, the caller + /// must behave conservatively. + mmo_iterator memoperands_begin() const { return memoperands().begin(); } + + /// Access to memory operands of the instruction. + /// + /// If `memoperands_begin() == memoperands_end()`, that does not imply + /// anything about whether the function accesses memory. Instead, the caller + /// must behave conservatively. + mmo_iterator memoperands_end() const { return memoperands().end(); } + /// Return true if we don't have any memory operands which described the /// memory access done by this instruction. If this is true, calling code /// must be conservative. - bool memoperands_empty() const { return NumMemRefs == 0; } - - iterator_range memoperands() { - return make_range(memoperands_begin(), memoperands_end()); - } - iterator_range memoperands() const { - return make_range(memoperands_begin(), memoperands_end()); - } + bool memoperands_empty() const { return memoperands().empty(); } /// Return true if this instruction has exactly one MachineMemOperand. - bool hasOneMemOperand() const { - return NumMemRefs == 1; - } + bool hasOneMemOperand() const { return memoperands().size() == 1; } /// Return the number of memory operands. - unsigned getNumMemOperands() const { return NumMemRefs; } + unsigned getNumMemOperands() const { return memoperands().size(); } + + /// Helper to extract a pre-instruction symbol if one has been added. + MCSymbol *getPreInstrSymbol() const { + if (!Info) + return nullptr; + if (MCSymbol *S = Info.get()) + return S; + if (ExtraInfo *EI = Info.get()) + return EI->getPreInstrSymbol(); + + return nullptr; + } + + /// Helper to extract a post-instruction symbol if one has been added. + MCSymbol *getPostInstrSymbol() const { + if (!Info) + return nullptr; + if (MCSymbol *S = Info.get()) + return S; + if (ExtraInfo *EI = Info.get()) + return EI->getPostInstrSymbol(); + + return nullptr; + } /// API for querying MachineInstr properties. They are the same as MCInstrDesc /// queries but they are bundle aware. @@ -1323,48 +1457,59 @@ /// fewer operand than it started with. void RemoveOperand(unsigned OpNo); + /// Clear this MachineInstr's memory reference descriptor list. This resets + /// the memrefs to their most conservative state. This should be used only + /// as a last resort since it greatly pessimizes our knowledge of the memory + /// access performed by the instruction. + void dropMemRefs(MachineFunction &MF); + + /// Assign this MachineInstr's memory reference descriptor list. + /// + /// Unlike other methods, this *will* allocate them into a new array + /// associated with the provided `MachineFunction`. + void setMemRefs(MachineFunction &MF, ArrayRef MemRefs); + /// Add a MachineMemOperand to the machine instruction. /// This function should be used only occasionally. The setMemRefs function /// is the primary method for setting up a MachineInstr's MemRefs list. void addMemOperand(MachineFunction &MF, MachineMemOperand *MO); - /// Assign this MachineInstr's memory reference descriptor list. - /// This does not transfer ownership. - void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd) { - setMemRefs(std::make_pair(NewMemRefs, NewMemRefsEnd-NewMemRefs)); - } - - /// Assign this MachineInstr's memory reference descriptor list. First - /// element in the pair is the begin iterator/pointer to the array; the - /// second is the number of MemoryOperands. This does not transfer ownership - /// of the underlying memory. - void setMemRefs(std::pair NewMemRefs) { - MemRefs = NewMemRefs.first; - NumMemRefs = uint8_t(NewMemRefs.second); - assert(NumMemRefs == NewMemRefs.second && - "Too many memrefs - must drop memory operands"); - } - - /// Return a set of memrefs (begin iterator, size) which conservatively - /// describe the memory behavior of both MachineInstrs. This is appropriate - /// for use when merging two MachineInstrs into one. This routine does not - /// modify the memrefs of the this MachineInstr. - std::pair mergeMemRefsWith(const MachineInstr& Other); + /// Clone another MachineInstr's memory reference descriptor list and replace + /// ours with it. + /// + /// Note that `*this` may be the incoming MI! + /// + /// Prefer this API whenever possible as it can avoid allocations in common + /// cases. + void cloneMemRefs(MachineFunction &MF, const MachineInstr &MI); + + /// Clone the merge of multiple MachineInstrs' memory reference descriptors + /// list and replace ours with it. + /// + /// Note that `*this` may be one of the incoming MIs! + /// + /// Prefer this API whenever possible as it can avoid allocations in common + /// cases. + void cloneMergedMemRefs(MachineFunction &MF, + ArrayRef MIs); + + /// Get or create a temporary symbol that will be emitted just prior to the + /// instruction itself. + /// + /// FIXME: This is not fully implemented yet. + MCSymbol *getOrCreatePreInstrTempSymbol(MCContext &MCCtx); + + /// Get or create a temporary symbol that will be emitted just after the + /// instruction itself. + /// + /// FIXME: This is not fully implemented yet. + MCSymbol *getOrCreatePostInstrTempSymbol(MCContext &MCCtx); /// Return the MIFlags which represent both MachineInstrs. This /// should be used when merging two MachineInstrs into one. This routine does /// not modify the MIFlags of this MachineInstr. uint16_t mergeFlagsWith(const MachineInstr& Other) const; - /// Clear this MachineInstr's memory reference descriptor list. This resets - /// the memrefs to their most conservative state. This should be used only - /// as a last resort since it greatly pessimizes our knowledge of the memory - /// access performed by the instruction. - void dropMemRefs() { - MemRefs = nullptr; - NumMemRefs = 0; - } - /// Break any tie involving OpIdx. void untieRegOperand(unsigned OpIdx) { MachineOperand &MO = getOperand(OpIdx); Index: llvm/trunk/include/llvm/CodeGen/MachineInstrBuilder.h =================================================================== --- llvm/trunk/include/llvm/CodeGen/MachineInstrBuilder.h +++ llvm/trunk/include/llvm/CodeGen/MachineInstrBuilder.h @@ -191,15 +191,20 @@ return *this; } - const MachineInstrBuilder &setMemRefs(MachineInstr::mmo_iterator b, - MachineInstr::mmo_iterator e) const { - MI->setMemRefs(b, e); + const MachineInstrBuilder & + setMemRefs(ArrayRef MMOs) const { + MI->setMemRefs(*MF, MMOs); return *this; } - const MachineInstrBuilder &setMemRefs(std::pair MemOperandsRef) const { - MI->setMemRefs(MemOperandsRef); + const MachineInstrBuilder &cloneMemRefs(const MachineInstr &OtherMI) const { + MI->cloneMemRefs(*MF, OtherMI); + return *this; + } + + const MachineInstrBuilder & + cloneMergedMemRefs(ArrayRef OtherMIs) const { + MI->cloneMergedMemRefs(*MF, OtherMIs); return *this; } Index: llvm/trunk/lib/CodeGen/BranchFolding.cpp =================================================================== --- llvm/trunk/lib/CodeGen/BranchFolding.cpp +++ llvm/trunk/lib/CodeGen/BranchFolding.cpp @@ -865,7 +865,7 @@ // Merge MMOs from memory operations in the common block. if (MBBICommon->mayLoad() || MBBICommon->mayStore()) - MBBICommon->setMemRefs(MBBICommon->mergeMemRefsWith(*MBBI)); + MBBICommon->cloneMergedMemRefs(*MBB->getParent(), {&*MBBICommon, &*MBBI}); // Drop undef flags if they aren't present in all merged instructions. for (unsigned I = 0, E = MBBICommon->getNumOperands(); I != E; ++I) { MachineOperand &MO = MBBICommon->getOperand(I); Index: llvm/trunk/lib/CodeGen/GlobalISel/IRTranslator.cpp =================================================================== --- llvm/trunk/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ llvm/trunk/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -706,13 +706,12 @@ return; MachinePointerInfo MPInfo(Global); - MachineInstr::mmo_iterator MemRefs = MF->allocateMemRefsArray(1); auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable; - *MemRefs = + MachineMemOperand *MemRef = MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8, DL->getPointerABIAlignment(0)); - MIB.setMemRefs(MemRefs, MemRefs + 1); + MIB.setMemRefs({MemRef}); } bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op, Index: llvm/trunk/lib/CodeGen/ImplicitNullChecks.cpp =================================================================== --- llvm/trunk/lib/CodeGen/ImplicitNullChecks.cpp +++ llvm/trunk/lib/CodeGen/ImplicitNullChecks.cpp @@ -651,7 +651,7 @@ } } - MIB.setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); + MIB.setMemRefs(MI->memoperands()); return MIB; } Index: llvm/trunk/lib/CodeGen/MIRParser/MIParser.cpp =================================================================== --- llvm/trunk/lib/CodeGen/MIRParser/MIParser.cpp +++ llvm/trunk/lib/CodeGen/MIRParser/MIParser.cpp @@ -797,10 +797,7 @@ return true; if (MemOperands.empty()) return false; - MachineInstr::mmo_iterator MemRefs = - MF.allocateMemRefsArray(MemOperands.size()); - std::copy(MemOperands.begin(), MemOperands.end(), MemRefs); - MI->setMemRefs(MemRefs, MemRefs + MemOperands.size()); + MI->setMemRefs(MF, MemOperands); return false; } Index: llvm/trunk/lib/CodeGen/MachineFunction.cpp =================================================================== --- llvm/trunk/lib/CodeGen/MachineFunction.cpp +++ llvm/trunk/lib/CodeGen/MachineFunction.cpp @@ -406,77 +406,12 @@ MMO->getOrdering(), MMO->getFailureOrdering()); } -MachineInstr::mmo_iterator -MachineFunction::allocateMemRefsArray(unsigned long Num) { - return Allocator.Allocate(Num); -} - -std::pair -MachineFunction::extractLoadMemRefs(MachineInstr::mmo_iterator Begin, - MachineInstr::mmo_iterator End) { - // Count the number of load mem refs. - unsigned Num = 0; - for (MachineInstr::mmo_iterator I = Begin; I != End; ++I) - if ((*I)->isLoad()) - ++Num; - - // Allocate a new array and populate it with the load information. - MachineInstr::mmo_iterator Result = allocateMemRefsArray(Num); - unsigned Index = 0; - for (MachineInstr::mmo_iterator I = Begin; I != End; ++I) { - if ((*I)->isLoad()) { - if (!(*I)->isStore()) - // Reuse the MMO. - Result[Index] = *I; - else { - // Clone the MMO and unset the store flag. - MachineMemOperand *JustLoad = - getMachineMemOperand((*I)->getPointerInfo(), - (*I)->getFlags() & ~MachineMemOperand::MOStore, - (*I)->getSize(), (*I)->getBaseAlignment(), - (*I)->getAAInfo(), nullptr, - (*I)->getSyncScopeID(), (*I)->getOrdering(), - (*I)->getFailureOrdering()); - Result[Index] = JustLoad; - } - ++Index; - } - } - return std::make_pair(Result, Result + Num); -} - -std::pair -MachineFunction::extractStoreMemRefs(MachineInstr::mmo_iterator Begin, - MachineInstr::mmo_iterator End) { - // Count the number of load mem refs. - unsigned Num = 0; - for (MachineInstr::mmo_iterator I = Begin; I != End; ++I) - if ((*I)->isStore()) - ++Num; - - // Allocate a new array and populate it with the store information. - MachineInstr::mmo_iterator Result = allocateMemRefsArray(Num); - unsigned Index = 0; - for (MachineInstr::mmo_iterator I = Begin; I != End; ++I) { - if ((*I)->isStore()) { - if (!(*I)->isLoad()) - // Reuse the MMO. - Result[Index] = *I; - else { - // Clone the MMO and unset the load flag. - MachineMemOperand *JustStore = - getMachineMemOperand((*I)->getPointerInfo(), - (*I)->getFlags() & ~MachineMemOperand::MOLoad, - (*I)->getSize(), (*I)->getBaseAlignment(), - (*I)->getAAInfo(), nullptr, - (*I)->getSyncScopeID(), (*I)->getOrdering(), - (*I)->getFailureOrdering()); - Result[Index] = JustStore; - } - ++Index; - } - } - return std::make_pair(Result, Result + Num); +MachineInstr::ExtraInfo * +MachineFunction::createMIExtraInfo(ArrayRef MMOs, + MCSymbol *PreInstrSymbol, + MCSymbol *PostInstrSymbol) { + return MachineInstr::ExtraInfo::create(Allocator, MMOs, PreInstrSymbol, + PostInstrSymbol); } const char *MachineFunction::createExternalSymbolName(StringRef Name) { Index: llvm/trunk/lib/CodeGen/MachineInstr.cpp =================================================================== --- llvm/trunk/lib/CodeGen/MachineInstr.cpp +++ llvm/trunk/lib/CodeGen/MachineInstr.cpp @@ -131,8 +131,7 @@ /// MachineInstr ctor - Copies MachineInstr arg exactly /// MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI) - : MCID(&MI.getDesc()), NumMemRefs(MI.NumMemRefs), MemRefs(MI.MemRefs), - debugLoc(MI.getDebugLoc()) { + : MCID(&MI.getDesc()), Info(MI.Info), debugLoc(MI.getDebugLoc()) { assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor"); CapOperands = OperandCapacity::get(MI.getNumOperands()); @@ -315,71 +314,178 @@ --NumOperands; } -/// addMemOperand - Add a MachineMemOperand to the machine instruction. -/// This function should be used only occasionally. The setMemRefs function -/// is the primary method for setting up a MachineInstr's MemRefs list. +void MachineInstr::dropMemRefs(MachineFunction &MF) { + if (memoperands_empty()) + return; + + // See if we can just drop all of our extra info. + if (!getPreInstrSymbol() && !getPostInstrSymbol()) { + Info.clear(); + return; + } + if (!getPostInstrSymbol()) { + Info.set(getPreInstrSymbol()); + return; + } + if (!getPreInstrSymbol()) { + Info.set(getPostInstrSymbol()); + return; + } + + // Otherwise allocate a fresh extra info with just these symbols. + Info.set( + MF.createMIExtraInfo({}, getPreInstrSymbol(), getPostInstrSymbol())); +} + +void MachineInstr::setMemRefs(MachineFunction &MF, + ArrayRef MMOs) { + if (MMOs.empty()) { + dropMemRefs(MF); + return; + } + + // Try to store a single MMO inline. + if (MMOs.size() == 1 && !getPreInstrSymbol() && !getPostInstrSymbol()) { + Info.set(MMOs[0]); + return; + } + + // Otherwise create an extra info struct with all of our info. + Info.set( + MF.createMIExtraInfo(MMOs, getPreInstrSymbol(), getPostInstrSymbol())); +} + void MachineInstr::addMemOperand(MachineFunction &MF, MachineMemOperand *MO) { - mmo_iterator OldMemRefs = MemRefs; - unsigned OldNumMemRefs = NumMemRefs; + SmallVector MMOs; + MMOs.append(memoperands_begin(), memoperands_end()); + MMOs.push_back(MO); + setMemRefs(MF, MMOs); +} + +void MachineInstr::cloneMemRefs(MachineFunction &MF, const MachineInstr &MI) { + if (this == &MI) + // Nothing to do for a self-clone! + return; - unsigned NewNum = NumMemRefs + 1; - mmo_iterator NewMemRefs = MF.allocateMemRefsArray(NewNum); + assert(&MF == MI.getMF() && + "Invalid machine functions when cloning memory refrences!"); + // See if we can just steal the extra info already allocated for the + // instruction. We can do this whenever the pre- and post-instruction symbols + // are the same (including null). + if (getPreInstrSymbol() == MI.getPreInstrSymbol() && + getPostInstrSymbol() == MI.getPostInstrSymbol()) { + Info = MI.Info; + return; + } - std::copy(OldMemRefs, OldMemRefs + OldNumMemRefs, NewMemRefs); - NewMemRefs[NewNum - 1] = MO; - setMemRefs(NewMemRefs, NewMemRefs + NewNum); + // Otherwise, fall back on a copy-based clone. + setMemRefs(MF, MI.memoperands()); } /// Check to see if the MMOs pointed to by the two MemRefs arrays are /// identical. -static bool hasIdenticalMMOs(const MachineInstr &MI1, const MachineInstr &MI2) { - auto I1 = MI1.memoperands_begin(), E1 = MI1.memoperands_end(); - auto I2 = MI2.memoperands_begin(), E2 = MI2.memoperands_end(); - if ((E1 - I1) != (E2 - I2)) +static bool hasIdenticalMMOs(ArrayRef LHS, + ArrayRef RHS) { + if (LHS.size() != RHS.size()) return false; - for (; I1 != E1; ++I1, ++I2) { - if (**I1 != **I2) - return false; + + auto LHSPointees = make_pointee_range(LHS); + auto RHSPointees = make_pointee_range(RHS); + return std::equal(LHSPointees.begin(), LHSPointees.end(), + RHSPointees.begin()); +} + +void MachineInstr::cloneMergedMemRefs(MachineFunction &MF, + ArrayRef MIs) { + // Try handling easy numbers of MIs with simpler mechanisms. + if (MIs.empty()) { + dropMemRefs(MF); + return; } - return true; + if (MIs.size() == 1) { + cloneMemRefs(MF, *MIs[0]); + return; + } + // Because an empty memoperands list provides *no* information and must be + // handled conservatively (assuming the instruction can do anything), the only + // way to merge with it is to drop all other memoperands. + if (MIs[0]->memoperands_empty()) { + dropMemRefs(MF); + return; + } + + // Handle the general case. + SmallVector MergedMMOs; + // Start with the first instruction. + assert(&MF == MIs[0]->getMF() && + "Invalid machine functions when cloning memory references!"); + MergedMMOs.append(MIs[0]->memoperands_begin(), MIs[0]->memoperands_end()); + // Now walk all the other instructions and accumulate any different MMOs. + for (const MachineInstr &MI : make_pointee_range(MIs.slice(1))) { + assert(&MF == MI.getMF() && + "Invalid machine functions when cloning memory references!"); + + // Skip MIs with identical operands to the first. This is a somewhat + // arbitrary hack but will catch common cases without being quadratic. + // TODO: We could fully implement merge semantics here if needed. + if (hasIdenticalMMOs(MIs[0]->memoperands(), MI.memoperands())) + continue; + + // Because an empty memoperands list provides *no* information and must be + // handled conservatively (assuming the instruction can do anything), the + // only way to merge with it is to drop all other memoperands. + if (MI.memoperands_empty()) { + dropMemRefs(MF); + return; + } + + // Otherwise accumulate these into our temporary buffer of the merged state. + MergedMMOs.append(MI.memoperands_begin(), MI.memoperands_end()); + } + + setMemRefs(MF, MergedMMOs); } -std::pair -MachineInstr::mergeMemRefsWith(const MachineInstr& Other) { +MCSymbol *MachineInstr::getOrCreatePreInstrTempSymbol(MCContext &MCCtx) { + MCSymbol *S = getPreInstrSymbol(); + if (S) + return S; + + // Create a new temp symbol. + S = MCCtx.createTempSymbol(); + + if (!Info) { + // If we don't have any other extra info, we can store this inline. + Info.set(S); + return S; + } - // If either of the incoming memrefs are empty, we must be conservative and - // treat this as if we've exhausted our space for memrefs and dropped them. - if (memoperands_empty() || Other.memoperands_empty()) - return std::make_pair(nullptr, 0); - - // If both instructions have identical memrefs, we don't need to merge them. - // Since many instructions have a single memref, and we tend to merge things - // like pairs of loads from the same location, this catches a large number of - // cases in practice. - if (hasIdenticalMMOs(*this, Other)) - return std::make_pair(MemRefs, NumMemRefs); - - // TODO: consider uniquing elements within the operand lists to reduce - // space usage and fall back to conservative information less often. - size_t CombinedNumMemRefs = NumMemRefs + Other.NumMemRefs; - - // If we don't have enough room to store this many memrefs, be conservative - // and drop them. Otherwise, we'd fail asserts when trying to add them to - // the new instruction. - if (CombinedNumMemRefs != uint8_t(CombinedNumMemRefs)) - return std::make_pair(nullptr, 0); - - MachineFunction *MF = getMF(); - mmo_iterator MemBegin = MF->allocateMemRefsArray(CombinedNumMemRefs); - mmo_iterator MemEnd = std::copy(memoperands_begin(), memoperands_end(), - MemBegin); - MemEnd = std::copy(Other.memoperands_begin(), Other.memoperands_end(), - MemEnd); - assert(MemEnd - MemBegin == (ptrdiff_t)CombinedNumMemRefs && - "missing memrefs"); + // Otherwise, allocate a fully set of extra info. + Info.set( + getMF()->createMIExtraInfo(memoperands(), S, getPostInstrSymbol())); + + return S; +} + +MCSymbol *MachineInstr::getOrCreatePostInstrTempSymbol(MCContext &MCCtx) { + MCSymbol *S = getPostInstrSymbol(); + if (S) + return S; + + // Create a new temp symbol. + S = MCCtx.createTempSymbol(); + + if (!Info) { + // If we don't have any other extra info, we can store this inline. + Info.set(S); + return S; + } - return std::make_pair(MemBegin, CombinedNumMemRefs); + // Otherwise, allocate a fully set of extra info. + Info.set( + getMF()->createMIExtraInfo(memoperands(), getPreInstrSymbol(), S)); + return S; } uint16_t MachineInstr::mergeFlagsWith(const MachineInstr &Other) const { Index: llvm/trunk/lib/CodeGen/MachineOutliner.cpp =================================================================== --- llvm/trunk/lib/CodeGen/MachineOutliner.cpp +++ llvm/trunk/lib/CodeGen/MachineOutliner.cpp @@ -1197,7 +1197,7 @@ for (unsigned Str : OF.Sequence) { MachineInstr *NewMI = MF.CloneMachineInstr(Mapper.IntegerInstructionMap.find(Str)->second); - NewMI->dropMemRefs(); + NewMI->dropMemRefs(MF); // Don't keep debug information for outlined instructions. NewMI->setDebugLoc(DebugLoc()); Index: llvm/trunk/lib/CodeGen/MachinePipeliner.cpp =================================================================== --- llvm/trunk/lib/CodeGen/MachinePipeliner.cpp +++ llvm/trunk/lib/CodeGen/MachinePipeliner.cpp @@ -3175,28 +3175,26 @@ return; // If the instruction has memory operands, then adjust the offset // when the instruction appears in different stages. - unsigned NumRefs = NewMI.memoperands_end() - NewMI.memoperands_begin(); - if (NumRefs == 0) + if (NewMI.memoperands_empty()) return; - MachineInstr::mmo_iterator NewMemRefs = MF.allocateMemRefsArray(NumRefs); - unsigned Refs = 0; + SmallVector NewMMOs; for (MachineMemOperand *MMO : NewMI.memoperands()) { if (MMO->isVolatile() || (MMO->isInvariant() && MMO->isDereferenceable()) || (!MMO->getValue())) { - NewMemRefs[Refs++] = MMO; + NewMMOs.push_back(MMO); continue; } unsigned Delta; if (Num != UINT_MAX && computeDelta(OldMI, Delta)) { int64_t AdjOffset = Delta * Num; - NewMemRefs[Refs++] = - MF.getMachineMemOperand(MMO, AdjOffset, MMO->getSize()); + NewMMOs.push_back( + MF.getMachineMemOperand(MMO, AdjOffset, MMO->getSize())); } else { - NewMI.dropMemRefs(); + NewMI.dropMemRefs(MF); return; } } - NewMI.setMemRefs(NewMemRefs, NewMemRefs + NumRefs); + NewMI.setMemRefs(MF, NewMMOs); } /// Clone the instruction for the new pipelined loop and update the Index: llvm/trunk/lib/CodeGen/SelectionDAG/InstrEmitter.cpp =================================================================== --- llvm/trunk/lib/CodeGen/SelectionDAG/InstrEmitter.cpp +++ llvm/trunk/lib/CodeGen/SelectionDAG/InstrEmitter.cpp @@ -886,12 +886,9 @@ MIB.addReg(ScratchRegs[i], RegState::ImplicitDefine | RegState::EarlyClobber); - // Transfer all of the memory reference descriptions of this instruction. - ArrayRef SDNodeMemRefs = - cast(Node)->memoperands(); - MachineMemOperand **MemRefs = MF->allocateMemRefsArray(SDNodeMemRefs.size()); - std::copy(SDNodeMemRefs.begin(), SDNodeMemRefs.end(), MemRefs); - MIB.setMemRefs({MemRefs, SDNodeMemRefs.size()}); + // Set the memory reference descriptions of this instruction now that it is + // part of the function. + MIB.setMemRefs(cast(Node)->memoperands()); // Insert the instruction into position in the block. This needs to // happen before any custom inserter hook is called so that the Index: llvm/trunk/lib/CodeGen/StackColoring.cpp =================================================================== --- llvm/trunk/lib/CodeGen/StackColoring.cpp +++ llvm/trunk/lib/CodeGen/StackColoring.cpp @@ -1022,9 +1022,7 @@ } // We adjust AliasAnalysis information for merged stack slots. - MachineInstr::mmo_iterator NewMemOps = - MF->allocateMemRefsArray(I.getNumMemOperands()); - unsigned MemOpIdx = 0; + SmallVector NewMMOs; bool ReplaceMemOps = false; for (MachineMemOperand *MMO : I.memoperands()) { // If this memory location can be a slot remapped here, @@ -1051,17 +1049,17 @@ } } if (MayHaveConflictingAAMD) { - NewMemOps[MemOpIdx++] = MF->getMachineMemOperand(MMO, AAMDNodes()); + NewMMOs.push_back(MF->getMachineMemOperand(MMO, AAMDNodes())); ReplaceMemOps = true; + } else { + NewMMOs.push_back(MMO); } - else - NewMemOps[MemOpIdx++] = MMO; } // If any memory operand is updated, set memory references of // this instruction. if (ReplaceMemOps) - I.setMemRefs(std::make_pair(NewMemOps, I.getNumMemOperands())); + I.setMemRefs(*MF, NewMMOs); } // Update the location of C++ catch objects for the MSVC personality routine. Index: llvm/trunk/lib/CodeGen/TargetInstrInfo.cpp =================================================================== --- llvm/trunk/lib/CodeGen/TargetInstrInfo.cpp +++ llvm/trunk/lib/CodeGen/TargetInstrInfo.cpp @@ -583,7 +583,7 @@ } if (NewMI) { - NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + NewMI->setMemRefs(MF, MI.memoperands()); // Add a memory operand, foldMemoryOperandImpl doesn't do that. assert((!(Flags & MachineMemOperand::MOStore) || NewMI->mayStore()) && @@ -653,10 +653,10 @@ // Copy the memoperands from the load to the folded instruction. if (MI.memoperands_empty()) { - NewMI->setMemRefs(LoadMI.memoperands_begin(), LoadMI.memoperands_end()); + NewMI->setMemRefs(MF, LoadMI.memoperands()); } else { // Handle the rare case of folding multiple loads. - NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + NewMI->setMemRefs(MF, MI.memoperands()); for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(), E = LoadMI.memoperands_end(); I != E; ++I) { Index: llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp =================================================================== --- llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp +++ llvm/trunk/lib/CodeGen/TargetLoweringBase.cpp @@ -968,7 +968,7 @@ MIB.add(MI->getOperand(i)); // Inherit previous memory operands. - MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); + MIB.cloneMemRefs(*MI); assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!"); // Add a new memory operand for this FI. Index: llvm/trunk/lib/Target/AArch64/AArch64FrameLowering.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64FrameLowering.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -479,7 +479,7 @@ MIB.addImm(CSStackSizeIncImm); MIB.setMIFlags(MBBI->getFlags()); - MIB.setMemRefs(MBBI->memoperands_begin(), MBBI->memoperands_end()); + MIB.setMemRefs(MBBI->memoperands()); return std::prev(MBB.erase(MBBI)); } Index: llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp @@ -702,7 +702,7 @@ .addReg(isNarrowStore(Opc) ? AArch64::WZR : AArch64::XZR) .add(BaseRegOp) .addImm(OffsetImm) - .setMemRefs(I->mergeMemRefsWith(*MergeMI)) + .cloneMergedMemRefs({&*I, &*MergeMI}) .setMIFlags(I->mergeFlagsWith(*MergeMI)); (void)MIB; @@ -819,7 +819,7 @@ .add(RegOp1) .add(BaseRegOp) .addImm(OffsetImm) - .setMemRefs(I->mergeMemRefsWith(*Paired)) + .cloneMergedMemRefs({&*I, &*Paired}) .setMIFlags(I->mergeFlagsWith(*Paired)); (void)MIB; @@ -1338,7 +1338,7 @@ .add(getLdStRegOp(*I)) .add(getLdStBaseOp(*I)) .addImm(Value) - .setMemRefs(I->memoperands_begin(), I->memoperands_end()) + .setMemRefs(I->memoperands()) .setMIFlags(I->mergeFlagsWith(*Update)); } else { // Paired instruction. @@ -1349,7 +1349,7 @@ .add(getLdStRegOp(*I, 1)) .add(getLdStBaseOp(*I)) .addImm(Value / Scale) - .setMemRefs(I->memoperands_begin(), I->memoperands_end()) + .setMemRefs(I->memoperands()) .setMIFlags(I->mergeFlagsWith(*Update)); } (void)MIB; Index: llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp +++ llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp @@ -3419,7 +3419,7 @@ for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) MIB.add(MI.getOperand(I)); - MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + MIB.cloneMemRefs(MI); MI.eraseFromParent(); return BB; } Index: llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp +++ llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -3735,7 +3735,7 @@ MIB.addImm(TFE->getImm()); } - MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + MIB.cloneMemRefs(MI); Addr64 = MIB; } else { // Atomics with return. @@ -3749,7 +3749,7 @@ .add(*SOffset) .add(*Offset) .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)) - .setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + .cloneMemRefs(MI); } MI.removeFromParent(); @@ -3955,17 +3955,17 @@ } MachineInstr *NewInstr = - BuildMI(*MBB, Inst, Inst.getDebugLoc(), - get(AMDGPU::BUFFER_LOAD_DWORD_OFFEN), VDst) - .add(*VAddr) // vaddr - .add(*getNamedOperand(Inst, AMDGPU::OpName::sbase)) // srsrc - .addImm(0) // soffset - .addImm(Offset) // offset - .addImm(getNamedOperand(Inst, AMDGPU::OpName::glc)->getImm()) - .addImm(0) // slc - .addImm(0) // tfe - .setMemRefs(Inst.memoperands_begin(), Inst.memoperands_end()) - .getInstr(); + BuildMI(*MBB, Inst, Inst.getDebugLoc(), + get(AMDGPU::BUFFER_LOAD_DWORD_OFFEN), VDst) + .add(*VAddr) // vaddr + .add(*getNamedOperand(Inst, AMDGPU::OpName::sbase)) // srsrc + .addImm(0) // soffset + .addImm(Offset) // offset + .addImm(getNamedOperand(Inst, AMDGPU::OpName::glc)->getImm()) + .addImm(0) // slc + .addImm(0) // tfe + .cloneMemRefs(Inst) + .getInstr(); MRI.replaceRegWith(getNamedOperand(Inst, AMDGPU::OpName::sdst)->getReg(), VDst); Index: llvm/trunk/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp +++ llvm/trunk/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp @@ -528,13 +528,12 @@ .addReg(AddrReg->getReg()); } - MachineInstrBuilder Read2 = - BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg) - .addReg(BaseReg, BaseRegFlags) // addr - .addImm(NewOffset0) // offset0 - .addImm(NewOffset1) // offset1 - .addImm(0) // gds - .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired)); + MachineInstrBuilder Read2 = BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg) + .addReg(BaseReg, BaseRegFlags) // addr + .addImm(NewOffset0) // offset0 + .addImm(NewOffset1) // offset1 + .addImm(0) // gds + .cloneMergedMemRefs({&*CI.I, &*CI.Paired}); (void)Read2; @@ -616,15 +615,14 @@ .addReg(AddrReg->getReg()); } - MachineInstrBuilder Write2 = - BuildMI(*MBB, CI.Paired, DL, Write2Desc) - .addReg(BaseReg, BaseRegFlags) // addr - .add(*Data0) // data0 - .add(*Data1) // data1 - .addImm(NewOffset0) // offset0 - .addImm(NewOffset1) // offset1 - .addImm(0) // gds - .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired)); + MachineInstrBuilder Write2 = BuildMI(*MBB, CI.Paired, DL, Write2Desc) + .addReg(BaseReg, BaseRegFlags) // addr + .add(*Data0) // data0 + .add(*Data1) // data1 + .addImm(NewOffset0) // offset0 + .addImm(NewOffset1) // offset1 + .addImm(0) // gds + .cloneMergedMemRefs({&*CI.I, &*CI.Paired}); moveInstsAfter(Write2, CI.InstsToMove); @@ -652,7 +650,7 @@ .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::sbase)) .addImm(MergedOffset) // offset .addImm(CI.GLC0) // glc - .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired)); + .cloneMergedMemRefs({&*CI.I, &*CI.Paired}); unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0; unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1; @@ -711,7 +709,7 @@ .addImm(CI.GLC0) // glc .addImm(CI.SLC0) // slc .addImm(0) // tfe - .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired)); + .cloneMergedMemRefs({&*CI.I, &*CI.Paired}); unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0; unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1; @@ -811,10 +809,10 @@ MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc)) .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset)) .addImm(std::min(CI.Offset0, CI.Offset1)) // offset - .addImm(CI.GLC0) // glc - .addImm(CI.SLC0) // slc - .addImm(0) // tfe - .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired)); + .addImm(CI.GLC0) // glc + .addImm(CI.SLC0) // slc + .addImm(0) // tfe + .cloneMergedMemRefs({&*CI.I, &*CI.Paired}); moveInstsAfter(MIB, CI.InstsToMove); Index: llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp +++ llvm/trunk/lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -495,15 +495,16 @@ return false; const MachineOperand *Reg = TII->getNamedOperand(*MI, AMDGPU::OpName::vdata); - MachineInstrBuilder NewMI = BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp)) - .add(*Reg) - .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc)) - .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)) - .addImm(Offset) - .addImm(0) // glc - .addImm(0) // slc - .addImm(0) // tfe - .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); + MachineInstrBuilder NewMI = + BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp)) + .add(*Reg) + .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc)) + .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)) + .addImm(Offset) + .addImm(0) // glc + .addImm(0) // slc + .addImm(0) // tfe + .cloneMemRefs(*MI); const MachineOperand *VDataIn = TII->getNamedOperand(*MI, AMDGPU::OpName::vdata_in); Index: llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.cpp =================================================================== --- llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -1590,11 +1590,10 @@ MachineFunction &MF = *MBB.getParent(); unsigned CPI = Orig.getOperand(1).getIndex(); unsigned PCLabelId = duplicateCPV(MF, CPI); - MachineInstrBuilder MIB = - BuildMI(MBB, I, Orig.getDebugLoc(), get(Opcode), DestReg) - .addConstantPoolIndex(CPI) - .addImm(PCLabelId); - MIB->setMemRefs(Orig.memoperands_begin(), Orig.memoperands_end()); + BuildMI(MBB, I, Orig.getDebugLoc(), get(Opcode), DestReg) + .addConstantPoolIndex(CPI) + .addImm(PCLabelId) + .cloneMemRefs(Orig); break; } } @@ -4534,9 +4533,9 @@ MIB = BuildMI(MBB, MI, DL, get(LoadOpc), Reg); MIB.addReg(Reg, RegState::Kill) - .addImm(0) - .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()) - .add(predOps(ARMCC::AL)); + .addImm(0) + .cloneMemRefs(*MI) + .add(predOps(ARMCC::AL)); } bool Index: llvm/trunk/lib/Target/ARM/ARMExpandPseudoInsts.cpp =================================================================== --- llvm/trunk/lib/Target/ARM/ARMExpandPseudoInsts.cpp +++ llvm/trunk/lib/Target/ARM/ARMExpandPseudoInsts.cpp @@ -570,7 +570,7 @@ TransferImpOps(MI, MIB, MIB); // Transfer memoperands. - MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + MIB.cloneMemRefs(MI); MI.eraseFromParent(); } @@ -645,7 +645,7 @@ TransferImpOps(MI, MIB, MIB); // Transfer memoperands. - MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + MIB.cloneMemRefs(MI); MI.eraseFromParent(); } @@ -735,7 +735,7 @@ MIB.addReg(DstReg, RegState::ImplicitDefine | getDeadRegState(DstIsDead)); TransferImpOps(MI, MIB, MIB); // Transfer memoperands. - MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + MIB.cloneMemRefs(MI); MI.eraseFromParent(); } @@ -848,8 +848,8 @@ unsigned SOImmValV2 = ARM_AM::getSOImmTwoPartSecond(ImmVal); LO16 = LO16.addImm(SOImmValV1); HI16 = HI16.addImm(SOImmValV2); - LO16->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); - HI16->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + LO16.cloneMemRefs(MI); + HI16.cloneMemRefs(MI); LO16.addImm(Pred).addReg(PredReg).add(condCodeOp()); HI16.addImm(Pred).addReg(PredReg).add(condCodeOp()); if (isCC) @@ -899,8 +899,8 @@ } } - LO16->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); - HI16->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + LO16.cloneMemRefs(MI); + HI16.cloneMemRefs(MI); LO16.addImm(Pred).addReg(PredReg); HI16.addImm(Pred).addReg(PredReg); @@ -1425,7 +1425,7 @@ MIB.addExternalSymbol("__aeabi_read_tp", 0); } - MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + MIB.cloneMemRefs(MI); TransferImpOps(MI, MIB, MIB); MI.eraseFromParent(); return true; @@ -1440,7 +1440,7 @@ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewLdOpc), DstReg) .add(MI.getOperand(1)) .add(predOps(ARMCC::AL)); - MIB1->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + MIB1.cloneMemRefs(MI); MachineInstrBuilder MIB2 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::tPICADD)) .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead)) @@ -1544,7 +1544,7 @@ if (isARM) { MIB3.add(predOps(ARMCC::AL)); if (Opcode == ARM::MOV_ga_pcrel_ldr) - MIB3->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + MIB3.cloneMemRefs(MI); } TransferImpOps(MI, MIB1, MIB3); MI.eraseFromParent(); @@ -1596,7 +1596,7 @@ // Add an implicit def for the super-register. MIB.addReg(DstReg, RegState::ImplicitDefine | getDeadRegState(DstIsDead)); TransferImpOps(MI, MIB, MIB); - MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + MIB.cloneMemRefs(MI); MI.eraseFromParent(); return true; } @@ -1629,7 +1629,7 @@ MIB->addRegisterKilled(SrcReg, TRI, true); TransferImpOps(MI, MIB, MIB); - MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + MIB.cloneMemRefs(MI); MI.eraseFromParent(); return true; } Index: llvm/trunk/lib/Target/ARM/ARMInstrInfo.cpp =================================================================== --- llvm/trunk/lib/Target/ARM/ARMInstrInfo.cpp +++ llvm/trunk/lib/Target/ARM/ARMInstrInfo.cpp @@ -132,7 +132,7 @@ BuildMI(MBB, MI, DL, get(ARM::LDRi12), Reg) .addReg(Reg, RegState::Kill) .addImm(0) - .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()) + .cloneMemRefs(*MI) .add(predOps(ARMCC::AL)); } Index: llvm/trunk/lib/Target/ARM/ARMLoadStoreOptimizer.cpp =================================================================== --- llvm/trunk/lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ llvm/trunk/lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -1303,7 +1303,7 @@ MIB.add(MI->getOperand(OpNum)); // Transfer memoperands. - MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); + MIB.setMemRefs(MI->memoperands()); MBB.erase(MBBI); return true; @@ -1527,7 +1527,7 @@ // Transfer implicit operands. for (const MachineOperand &MO : MI.implicit_operands()) MIB.add(MO); - MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + MIB.setMemRefs(MI.memoperands()); MBB.erase(MBBI); return true; @@ -2290,7 +2290,7 @@ if (!isT2) MIB.addReg(0); MIB.addImm(Offset).addImm(Pred).addReg(PredReg); - MIB.setMemRefs(Op0->mergeMemRefsWith(*Op1)); + MIB.cloneMergedMemRefs({Op0, Op1}); LLVM_DEBUG(dbgs() << "Formed " << *MIB << "\n"); ++NumLDRDFormed; } else { @@ -2304,7 +2304,7 @@ if (!isT2) MIB.addReg(0); MIB.addImm(Offset).addImm(Pred).addReg(PredReg); - MIB.setMemRefs(Op0->mergeMemRefsWith(*Op1)); + MIB.cloneMergedMemRefs({Op0, Op1}); LLVM_DEBUG(dbgs() << "Formed " << *MIB << "\n"); ++NumSTRDFormed; } Index: llvm/trunk/lib/Target/ARM/Thumb2SizeReduction.cpp =================================================================== --- llvm/trunk/lib/Target/ARM/Thumb2SizeReduction.cpp +++ llvm/trunk/lib/Target/ARM/Thumb2SizeReduction.cpp @@ -485,7 +485,7 @@ .addReg(Rt, IsStore ? 0 : RegState::Define); // Transfer memoperands. - MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); + MIB.setMemRefs(MI->memoperands()); // Transfer MI flags. MIB.setMIFlags(MI->getFlags()); @@ -605,7 +605,7 @@ MIB.add(MI->getOperand(OpNum)); // Transfer memoperands. - MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); + MIB.setMemRefs(MI->memoperands()); // Transfer MI flags. MIB.setMIFlags(MI->getFlags()); Index: llvm/trunk/lib/Target/AVR/AVRExpandPseudoInsts.cpp =================================================================== --- llvm/trunk/lib/Target/AVR/AVRExpandPseudoInsts.cpp +++ llvm/trunk/lib/Target/AVR/AVRExpandPseudoInsts.cpp @@ -568,8 +568,8 @@ llvm_unreachable("Unknown operand type!"); } - MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); - MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + MIBLO.setMemRefs(MI.memoperands()); + MIBHI.setMemRefs(MI.memoperands()); MI.eraseFromParent(); return true; @@ -617,8 +617,8 @@ buildMI(MBB, MBBI, AVR::POPRd).addReg(DstLoReg); } - MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); - MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + MIBLO.setMemRefs(MI.memoperands()); + MIBHI.setMemRefs(MI.memoperands()); MI.eraseFromParent(); return true; @@ -648,8 +648,8 @@ .addReg(SrcReg, RegState::Define | getDeadRegState(SrcIsDead)) .addReg(SrcReg, RegState::Kill); - MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); - MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + MIBLO.setMemRefs(MI.memoperands()); + MIBHI.setMemRefs(MI.memoperands()); MI.eraseFromParent(); return true; @@ -679,8 +679,8 @@ .addReg(SrcReg, RegState::Define | getDeadRegState(SrcIsDead)) .addReg(SrcReg, RegState::Kill); - MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); - MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + MIBLO.setMemRefs(MI.memoperands()); + MIBHI.setMemRefs(MI.memoperands()); MI.eraseFromParent(); return true; @@ -734,8 +734,8 @@ buildMI(MBB, MBBI, AVR::POPRd).addReg(DstLoReg); } - MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); - MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + MIBLO.setMemRefs(MI.memoperands()); + MIBHI.setMemRefs(MI.memoperands()); MI.eraseFromParent(); return true; @@ -782,8 +782,8 @@ buildMI(MBB, MBBI, AVR::POPRd).addReg(DstLoReg); } - MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); - MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + MIBLO.setMemRefs(MI.memoperands()); + MIBHI.setMemRefs(MI.memoperands()); MI.eraseFromParent(); return true; @@ -1003,8 +1003,8 @@ MIBLO.addReg(SrcLoReg, getKillRegState(SrcIsKill)); MIBHI.addReg(SrcHiReg, getKillRegState(SrcIsKill)); - MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); - MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + MIBLO.setMemRefs(MI.memoperands()); + MIBHI.setMemRefs(MI.memoperands()); MI.eraseFromParent(); return true; @@ -1031,8 +1031,8 @@ .addImm(1) .addReg(SrcHiReg, getKillRegState(SrcIsKill)); - MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); - MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + MIBLO.setMemRefs(MI.memoperands()); + MIBHI.setMemRefs(MI.memoperands()); MI.eraseFromParent(); return true; @@ -1065,8 +1065,8 @@ .addReg(SrcHiReg, getKillRegState(SrcIsKill)) .addImm(Imm); - MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); - MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + MIBLO.setMemRefs(MI.memoperands()); + MIBHI.setMemRefs(MI.memoperands()); MI.eraseFromParent(); return true; @@ -1099,8 +1099,8 @@ .addReg(SrcLoReg, getKillRegState(SrcIsKill)) .addImm(Imm); - MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); - MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + MIBLO.setMemRefs(MI.memoperands()); + MIBHI.setMemRefs(MI.memoperands()); MI.eraseFromParent(); return true; @@ -1133,8 +1133,8 @@ .addImm(Imm + 1) .addReg(SrcHiReg, getKillRegState(SrcIsKill)); - MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); - MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + MIBLO.setMemRefs(MI.memoperands()); + MIBHI.setMemRefs(MI.memoperands()); MI.eraseFromParent(); return true; @@ -1163,8 +1163,8 @@ .addReg(DstHiReg, RegState::Define | getDeadRegState(DstIsDead)) .addImm(Imm + 1); - MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); - MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + MIBLO.setMemRefs(MI.memoperands()); + MIBHI.setMemRefs(MI.memoperands()); MI.eraseFromParent(); return true; @@ -1194,8 +1194,8 @@ .addImm(Imm) .addReg(SrcLoReg, getKillRegState(SrcIsKill)); - MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); - MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + MIBLO.setMemRefs(MI.memoperands()); + MIBHI.setMemRefs(MI.memoperands()); MI.eraseFromParent(); return true; Index: llvm/trunk/lib/Target/Hexagon/HexagonConstExtenders.cpp =================================================================== --- llvm/trunk/lib/Target/Hexagon/HexagonConstExtenders.cpp +++ llvm/trunk/lib/Target/Hexagon/HexagonConstExtenders.cpp @@ -1629,7 +1629,7 @@ else MIB.add(MachineOperand(ExtR)); } - MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + MIB.cloneMemRefs(MI); MBB.erase(MI); return true; } @@ -1680,7 +1680,7 @@ // Add the stored value for stores. if (MI.mayStore()) MIB.add(getStoredValueOp(MI)); - MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + MIB.cloneMemRefs(MI); MBB.erase(MI); return true; } @@ -1797,7 +1797,7 @@ // Add the stored value for stores. if (MI.mayStore()) MIB.add(getStoredValueOp(MI)); - MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + MIB.cloneMemRefs(MI); MBB.erase(MI); return true; } Index: llvm/trunk/lib/Target/Hexagon/HexagonEarlyIfConv.cpp =================================================================== --- llvm/trunk/lib/Target/Hexagon/HexagonEarlyIfConv.cpp +++ llvm/trunk/lib/Target/Hexagon/HexagonEarlyIfConv.cpp @@ -731,9 +731,7 @@ MIB.add(MO); // Set memory references. - MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); - MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); - MIB.setMemRefs(MMOBegin, MMOEnd); + MIB.cloneMemRefs(*MI); MI->eraseFromParent(); return; Index: llvm/trunk/lib/Target/Hexagon/HexagonExpandCondsets.cpp =================================================================== --- llvm/trunk/lib/Target/Hexagon/HexagonExpandCondsets.cpp +++ llvm/trunk/lib/Target/Hexagon/HexagonExpandCondsets.cpp @@ -891,14 +891,7 @@ MB.add(MO); Ox++; } - - MachineFunction &MF = *B.getParent(); - MachineInstr::mmo_iterator I = MI.memoperands_begin(); - unsigned NR = std::distance(I, MI.memoperands_end()); - MachineInstr::mmo_iterator MemRefs = MF.allocateMemRefsArray(NR); - for (unsigned i = 0; i < NR; ++i) - MemRefs[i] = *I++; - MB.setMemRefs(MemRefs, MemRefs+NR); + MB.cloneMemRefs(MI); MachineInstr *NewI = MB; NewI->clearKillInfo(); Index: llvm/trunk/lib/Target/Hexagon/HexagonFrameLowering.cpp =================================================================== --- llvm/trunk/lib/Target/Hexagon/HexagonFrameLowering.cpp +++ llvm/trunk/lib/Target/Hexagon/HexagonFrameLowering.cpp @@ -1579,10 +1579,10 @@ // S2_storeri_io FI, 0, TmpR BuildMI(B, It, DL, HII.get(Hexagon::S2_storeri_io)) - .addFrameIndex(FI) - .addImm(0) - .addReg(TmpR, RegState::Kill) - .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); + .addFrameIndex(FI) + .addImm(0) + .addReg(TmpR, RegState::Kill) + .cloneMemRefs(*MI); NewRegs.push_back(TmpR); B.erase(It); @@ -1604,9 +1604,9 @@ // TmpR = L2_loadri_io FI, 0 unsigned TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); BuildMI(B, It, DL, HII.get(Hexagon::L2_loadri_io), TmpR) - .addFrameIndex(FI) - .addImm(0) - .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); + .addFrameIndex(FI) + .addImm(0) + .cloneMemRefs(*MI); // DstR = C2_tfrrp TmpR if DstR is a predicate register // DstR = A2_tfrrcr TmpR if DstR is a modifier register @@ -1731,10 +1731,10 @@ StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai : Hexagon::V6_vS32Ub_ai; BuildMI(B, It, DL, HII.get(StoreOpc)) - .addFrameIndex(FI) - .addImm(0) - .addReg(SrcLo, getKillRegState(IsKill)) - .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); + .addFrameIndex(FI) + .addImm(0) + .addReg(SrcLo, getKillRegState(IsKill)) + .cloneMemRefs(*MI); } // Store high part. @@ -1742,10 +1742,10 @@ StoreOpc = NeedAlign <= MinAlign(HasAlign, Size) ? Hexagon::V6_vS32b_ai : Hexagon::V6_vS32Ub_ai; BuildMI(B, It, DL, HII.get(StoreOpc)) - .addFrameIndex(FI) - .addImm(Size) - .addReg(SrcHi, getKillRegState(IsKill)) - .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); + .addFrameIndex(FI) + .addImm(Size) + .addReg(SrcHi, getKillRegState(IsKill)) + .cloneMemRefs(*MI); } B.erase(It); @@ -1777,17 +1777,17 @@ LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai : Hexagon::V6_vL32Ub_ai; BuildMI(B, It, DL, HII.get(LoadOpc), DstLo) - .addFrameIndex(FI) - .addImm(0) - .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); + .addFrameIndex(FI) + .addImm(0) + .cloneMemRefs(*MI); // Load high part. LoadOpc = NeedAlign <= MinAlign(HasAlign, Size) ? Hexagon::V6_vL32b_ai : Hexagon::V6_vL32Ub_ai; BuildMI(B, It, DL, HII.get(LoadOpc), DstHi) - .addFrameIndex(FI) - .addImm(Size) - .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); + .addFrameIndex(FI) + .addImm(Size) + .cloneMemRefs(*MI); B.erase(It); return true; @@ -1813,10 +1813,10 @@ unsigned StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai : Hexagon::V6_vS32Ub_ai; BuildMI(B, It, DL, HII.get(StoreOpc)) - .addFrameIndex(FI) - .addImm(0) - .addReg(SrcR, getKillRegState(IsKill)) - .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); + .addFrameIndex(FI) + .addImm(0) + .addReg(SrcR, getKillRegState(IsKill)) + .cloneMemRefs(*MI); B.erase(It); return true; @@ -1841,9 +1841,9 @@ unsigned LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai : Hexagon::V6_vL32Ub_ai; BuildMI(B, It, DL, HII.get(LoadOpc), DstR) - .addFrameIndex(FI) - .addImm(0) - .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); + .addFrameIndex(FI) + .addImm(0) + .cloneMemRefs(*MI); B.erase(It); return true; Index: llvm/trunk/lib/Target/Hexagon/HexagonInstrInfo.cpp =================================================================== --- llvm/trunk/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ llvm/trunk/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -1086,19 +1086,18 @@ unsigned NewOpc = Aligned ? Hexagon::V6_vS32b_ai : Hexagon::V6_vS32Ub_ai; unsigned Offset = HRI.getSpillSize(Hexagon::HvxVRRegClass); - MachineInstr *MI1New = - BuildMI(MBB, MI, DL, get(NewOpc)) - .add(MI.getOperand(0)) - .addImm(MI.getOperand(1).getImm()) - .addReg(SrcSubLo) - .setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + MachineInstr *MI1New = BuildMI(MBB, MI, DL, get(NewOpc)) + .add(MI.getOperand(0)) + .addImm(MI.getOperand(1).getImm()) + .addReg(SrcSubLo) + .cloneMemRefs(MI); MI1New->getOperand(0).setIsKill(false); BuildMI(MBB, MI, DL, get(NewOpc)) .add(MI.getOperand(0)) // The Vectors are indexed in multiples of vector size. .addImm(MI.getOperand(1).getImm() + Offset) .addReg(SrcSubHi) - .setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + .cloneMemRefs(MI); MBB.erase(MI); return true; } @@ -1111,15 +1110,15 @@ MachineInstr *MI1New = BuildMI(MBB, MI, DL, get(NewOpc), HRI.getSubReg(DstReg, Hexagon::vsub_lo)) - .add(MI.getOperand(1)) - .addImm(MI.getOperand(2).getImm()) - .setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + .add(MI.getOperand(1)) + .addImm(MI.getOperand(2).getImm()) + .cloneMemRefs(MI); MI1New->getOperand(1).setIsKill(false); BuildMI(MBB, MI, DL, get(NewOpc), HRI.getSubReg(DstReg, Hexagon::vsub_hi)) .add(MI.getOperand(1)) // The Vectors are indexed in multiples of vector size. .addImm(MI.getOperand(2).getImm() + Offset) - .setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + .cloneMemRefs(MI); MBB.erase(MI); return true; } Index: llvm/trunk/lib/Target/Lanai/LanaiMemAluCombiner.cpp =================================================================== --- llvm/trunk/lib/Target/Lanai/LanaiMemAluCombiner.cpp +++ llvm/trunk/lib/Target/Lanai/LanaiMemAluCombiner.cpp @@ -277,8 +277,7 @@ InstrBuilder.addImm(LPAC::makePostOp(AluOpcode)); // Transfer memory operands. - InstrBuilder->setMemRefs(MemInstr->memoperands_begin(), - MemInstr->memoperands_end()); + InstrBuilder.setMemRefs(MemInstr->memoperands()); } // Function determines if ALU operation (in alu_iter) can be combined with Index: llvm/trunk/lib/Target/Mips/MipsInstrInfo.cpp =================================================================== --- llvm/trunk/lib/Target/Mips/MipsInstrInfo.cpp +++ llvm/trunk/lib/Target/Mips/MipsInstrInfo.cpp @@ -663,8 +663,7 @@ } MIB.copyImplicitOps(*I); - - MIB.setMemRefs(I->memoperands_begin(), I->memoperands_end()); + MIB.cloneMemRefs(*I); return MIB; } Index: llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp +++ llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp @@ -9964,10 +9964,6 @@ const BasicBlock *BB = MBB->getBasicBlock(); MachineFunction::iterator I = ++MBB->getIterator(); - // Memory Reference - MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin(); - MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end(); - unsigned DstReg = MI.getOperand(0).getReg(); const TargetRegisterClass *RC = MRI.getRegClass(DstReg); assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!"); @@ -10030,10 +10026,10 @@ if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) { setUsesTOCBasePtr(*MBB->getParent()); MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) - .addReg(PPC::X2) - .addImm(TOCOffset) - .addReg(BufReg); - MIB.setMemRefs(MMOBegin, MMOEnd); + .addReg(PPC::X2) + .addImm(TOCOffset) + .addReg(BufReg) + .cloneMemRefs(MI); } // Naked functions never have a base pointer, and so we use r1. For all @@ -10048,8 +10044,8 @@ TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW)) .addReg(BaseReg) .addImm(BPOffset) - .addReg(BufReg); - MIB.setMemRefs(MMOBegin, MMOEnd); + .addReg(BufReg) + .cloneMemRefs(MI); // Setup MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); @@ -10082,8 +10078,7 @@ .addImm(LabelOffset) .addReg(BufReg); } - - MIB.setMemRefs(MMOBegin, MMOEnd); + MIB.cloneMemRefs(MI); BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); mainMBB->addSuccessor(sinkMBB); @@ -10107,10 +10102,6 @@ MachineFunction *MF = MBB->getParent(); MachineRegisterInfo &MRI = MF->getRegInfo(); - // Memory Reference - MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin(); - MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end(); - MVT PVT = getPointerTy(MF->getDataLayout()); assert((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!"); @@ -10148,7 +10139,7 @@ .addImm(0) .addReg(BufReg); } - MIB.setMemRefs(MMOBegin, MMOEnd); + MIB.cloneMemRefs(MI); // Reload IP if (PVT == MVT::i64) { @@ -10160,7 +10151,7 @@ .addImm(LabelOffset) .addReg(BufReg); } - MIB.setMemRefs(MMOBegin, MMOEnd); + MIB.cloneMemRefs(MI); // Reload SP if (PVT == MVT::i64) { @@ -10172,7 +10163,7 @@ .addImm(SPOffset) .addReg(BufReg); } - MIB.setMemRefs(MMOBegin, MMOEnd); + MIB.cloneMemRefs(MI); // Reload BP if (PVT == MVT::i64) { @@ -10184,16 +10175,15 @@ .addImm(BPOffset) .addReg(BufReg); } - MIB.setMemRefs(MMOBegin, MMOEnd); + MIB.cloneMemRefs(MI); // Reload TOC if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) { setUsesTOCBasePtr(*MBB->getParent()); MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) - .addImm(TOCOffset) - .addReg(BufReg); - - MIB.setMemRefs(MMOBegin, MMOEnd); + .addImm(TOCOffset) + .addReg(BufReg) + .cloneMemRefs(MI); } // Jump Index: llvm/trunk/lib/Target/SystemZ/SystemZElimCompare.cpp =================================================================== --- llvm/trunk/lib/Target/SystemZ/SystemZElimCompare.cpp +++ llvm/trunk/lib/Target/SystemZ/SystemZElimCompare.cpp @@ -294,11 +294,10 @@ return false; // Rebuild to get the CC operand in the right place. - MachineInstr *BuiltMI = - BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(Opcode)); + auto MIB = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(Opcode)); for (const auto &MO : MI.operands()) - BuiltMI->addOperand(MO); - BuiltMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + MIB.add(MO); + MIB.setMemRefs(MI.memoperands()); MI.eraseFromParent(); return true; Index: llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.cpp +++ llvm/trunk/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -6852,7 +6852,7 @@ .addImm(ThisLength) .add(SrcBase) .addImm(SrcDisp) - ->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + .setMemRefs(MI.memoperands()); DestDisp += ThisLength; SrcDisp += ThisLength; Length -= ThisLength; Index: llvm/trunk/lib/Target/X86/X86FixupBWInsts.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86FixupBWInsts.cpp +++ llvm/trunk/lib/Target/X86/X86FixupBWInsts.cpp @@ -288,7 +288,7 @@ for (unsigned i = 1; i < NumArgs; ++i) MIB.add(MI->getOperand(i)); - MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); + MIB.setMemRefs(MI->memoperands()); return MIB; } Index: llvm/trunk/lib/Target/X86/X86FlagsCopyLowering.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86FlagsCopyLowering.cpp +++ llvm/trunk/lib/Target/X86/X86FlagsCopyLowering.cpp @@ -1048,7 +1048,7 @@ MIB.addReg(CondReg); - MIB->setMemRefs(SetCCI.memoperands_begin(), SetCCI.memoperands_end()); + MIB.setMemRefs(SetCCI.memoperands()); SetCCI.eraseFromParent(); return; Index: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp +++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp @@ -26795,8 +26795,8 @@ // Memory Reference assert(MI.hasOneMemOperand() && "Expected VAARG_64 to have one memoperand"); - MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin(); - MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end(); + SmallVector MMOs(MI.memoperands_begin(), + MI.memoperands_end()); // Machine Information const TargetInstrInfo *TII = Subtarget.getInstrInfo(); @@ -26894,7 +26894,7 @@ .add(Index) .addDisp(Disp, UseFPOffset ? 4 : 0) .add(Segment) - .setMemRefs(MMOBegin, MMOEnd); + .setMemRefs(MMOs); // Check if there is enough room left to pull this argument. BuildMI(thisMBB, DL, TII->get(X86::CMP32ri)) @@ -26919,7 +26919,7 @@ .add(Index) .addDisp(Disp, 16) .add(Segment) - .setMemRefs(MMOBegin, MMOEnd); + .setMemRefs(MMOs); // Zero-extend the offset unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass); @@ -26947,7 +26947,7 @@ .addDisp(Disp, UseFPOffset ? 4 : 0) .add(Segment) .addReg(NextOffsetReg) - .setMemRefs(MMOBegin, MMOEnd); + .setMemRefs(MMOs); // Jump to endMBB BuildMI(offsetMBB, DL, TII->get(X86::JMP_1)) @@ -26966,7 +26966,7 @@ .add(Index) .addDisp(Disp, 8) .add(Segment) - .setMemRefs(MMOBegin, MMOEnd); + .setMemRefs(MMOs); // If we need to align it, do so. Otherwise, just copy the address // to OverflowDestReg. @@ -27003,7 +27003,7 @@ .addDisp(Disp, 8) .add(Segment) .addReg(NextAddrReg) - .setMemRefs(MMOBegin, MMOEnd); + .setMemRefs(MMOs); // If we branched, emit the PHI to the front of endMBB. if (offsetMBB) { @@ -27977,8 +27977,8 @@ MachineInstrBuilder MIB; // Memory Reference. - MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin(); - MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end(); + SmallVector MMOs(MI.memoperands_begin(), + MI.memoperands_end()); // Initialize a register with zero. MVT PVT = getPointerTy(MF->getDataLayout()); @@ -28007,7 +28007,7 @@ MIB.add(MI.getOperand(MemOpndSlot + i)); } MIB.addReg(SSPCopyReg); - MIB.setMemRefs(MMOBegin, MMOEnd); + MIB.setMemRefs(MMOs); } MachineBasicBlock * @@ -28023,8 +28023,8 @@ MachineFunction::iterator I = ++MBB->getIterator(); // Memory Reference - MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin(); - MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end(); + SmallVector MMOs(MI.memoperands_begin(), + MI.memoperands_end()); unsigned DstReg; unsigned MemOpndSlot = 0; @@ -28118,7 +28118,7 @@ MIB.addReg(LabelReg); else MIB.addMBB(restoreMBB); - MIB.setMemRefs(MMOBegin, MMOEnd); + MIB.setMemRefs(MMOs); if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) { emitSetJmpShadowStackFix(MI, thisMBB); @@ -28179,8 +28179,8 @@ MachineRegisterInfo &MRI = MF->getRegInfo(); // Memory Reference - MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin(); - MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end(); + SmallVector MMOs(MI.memoperands_begin(), + MI.memoperands_end()); MVT PVT = getPointerTy(MF->getDataLayout()); const TargetRegisterClass *PtrRC = getRegClassFor(PVT); @@ -28267,7 +28267,7 @@ else MIB.add(MI.getOperand(i)); } - MIB.setMemRefs(MMOBegin, MMOEnd); + MIB.setMemRefs(MMOs); // Subtract the current SSP from the previous SSP. unsigned SspSubReg = MRI.createVirtualRegister(PtrRC); @@ -28351,8 +28351,8 @@ MachineRegisterInfo &MRI = MF->getRegInfo(); // Memory Reference - MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin(); - MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end(); + SmallVector MMOs(MI.memoperands_begin(), + MI.memoperands_end()); MVT PVT = getPointerTy(MF->getDataLayout()); assert((PVT == MVT::i64 || PVT == MVT::i32) && @@ -28385,7 +28385,7 @@ MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), FP); for (unsigned i = 0; i < X86::AddrNumOperands; ++i) MIB.add(MI.getOperand(i)); - MIB.setMemRefs(MMOBegin, MMOEnd); + MIB.setMemRefs(MMOs); // Reload IP MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), Tmp); @@ -28395,7 +28395,7 @@ else MIB.add(MI.getOperand(i)); } - MIB.setMemRefs(MMOBegin, MMOEnd); + MIB.setMemRefs(MMOs); // Reload SP MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), SP); @@ -28405,7 +28405,7 @@ else MIB.add(MI.getOperand(i)); } - MIB.setMemRefs(MMOBegin, MMOEnd); + MIB.setMemRefs(MMOs); // Jump BuildMI(*thisMBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp); Index: llvm/trunk/lib/Target/X86/X86InstrInfo.h =================================================================== --- llvm/trunk/lib/Target/X86/X86InstrInfo.h +++ llvm/trunk/lib/Target/X86/X86InstrInfo.h @@ -359,8 +359,7 @@ void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill, SmallVectorImpl &Addr, const TargetRegisterClass *RC, - MachineInstr::mmo_iterator MMOBegin, - MachineInstr::mmo_iterator MMOEnd, + ArrayRef MMOs, SmallVectorImpl &NewMIs) const; void loadRegFromStackSlot(MachineBasicBlock &MBB, @@ -371,8 +370,7 @@ void loadRegFromAddr(MachineFunction &MF, unsigned DestReg, SmallVectorImpl &Addr, const TargetRegisterClass *RC, - MachineInstr::mmo_iterator MMOBegin, - MachineInstr::mmo_iterator MMOEnd, + ArrayRef MMOs, SmallVectorImpl &NewMIs) const; bool expandPostRAPseudo(MachineInstr &MI) const override; Index: llvm/trunk/lib/Target/X86/X86InstrInfo.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86InstrInfo.cpp +++ llvm/trunk/lib/Target/X86/X86InstrInfo.cpp @@ -3308,24 +3308,21 @@ .addReg(SrcReg, getKillRegState(isKill)); } -void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg, - bool isKill, - SmallVectorImpl &Addr, - const TargetRegisterClass *RC, - MachineInstr::mmo_iterator MMOBegin, - MachineInstr::mmo_iterator MMOEnd, - SmallVectorImpl &NewMIs) const { +void X86InstrInfo::storeRegToAddr( + MachineFunction &MF, unsigned SrcReg, bool isKill, + SmallVectorImpl &Addr, const TargetRegisterClass *RC, + ArrayRef MMOs, + SmallVectorImpl &NewMIs) const { const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); unsigned Alignment = std::max(TRI.getSpillSize(*RC), 16); - bool isAligned = MMOBegin != MMOEnd && - (*MMOBegin)->getAlignment() >= Alignment; + bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment; unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget); DebugLoc DL; MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc)); for (unsigned i = 0, e = Addr.size(); i != e; ++i) MIB.add(Addr[i]); MIB.addReg(SrcReg, getKillRegState(isKill)); - (*MIB).setMemRefs(MMOBegin, MMOEnd); + MIB.setMemRefs(MMOs); NewMIs.push_back(MIB); } @@ -3345,22 +3342,20 @@ addFrameReference(BuildMI(MBB, MI, DL, get(Opc), DestReg), FrameIdx); } -void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg, - SmallVectorImpl &Addr, - const TargetRegisterClass *RC, - MachineInstr::mmo_iterator MMOBegin, - MachineInstr::mmo_iterator MMOEnd, - SmallVectorImpl &NewMIs) const { +void X86InstrInfo::loadRegFromAddr( + MachineFunction &MF, unsigned DestReg, + SmallVectorImpl &Addr, const TargetRegisterClass *RC, + ArrayRef MMOs, + SmallVectorImpl &NewMIs) const { const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); unsigned Alignment = std::max(TRI.getSpillSize(*RC), 16); - bool isAligned = MMOBegin != MMOEnd && - (*MMOBegin)->getAlignment() >= Alignment; + bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment; unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget); DebugLoc DL; MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg); for (unsigned i = 0, e = Addr.size(); i != e; ++i) MIB.add(Addr[i]); - (*MIB).setMemRefs(MMOBegin, MMOEnd); + MIB.setMemRefs(MMOs); NewMIs.push_back(MIB); } @@ -5450,9 +5445,8 @@ // Emit the load instruction. if (UnfoldLoad) { - std::pair MMOs = - MF.extractLoadMemRefs(MI.memoperands_begin(), MI.memoperands_end()); - loadRegFromAddr(MF, Reg, AddrOps, RC, MMOs.first, MMOs.second, NewMIs); + auto MMOs = extractLoadMMOs(MI.memoperands(), MF); + loadRegFromAddr(MF, Reg, AddrOps, RC, MMOs, NewMIs); if (UnfoldStore) { // Address operands cannot be marked isKill. for (unsigned i = 1; i != 1 + X86::AddrNumOperands; ++i) { @@ -5517,9 +5511,8 @@ // Emit the store instruction. if (UnfoldStore) { const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI, MF); - std::pair MMOs = - MF.extractStoreMemRefs(MI.memoperands_begin(), MI.memoperands_end()); - storeRegToAddr(MF, Reg, true, AddrOps, DstRC, MMOs.first, MMOs.second, NewMIs); + auto MMOs = extractStoreMMOs(MI.memoperands(), MF); + storeRegToAddr(MF, Reg, true, AddrOps, DstRC, MMOs, NewMIs); } return true;