diff --git a/llvm/include/llvm/Analysis/MemoryLocation.h b/llvm/include/llvm/Analysis/MemoryLocation.h
--- a/llvm/include/llvm/Analysis/MemoryLocation.h
+++ b/llvm/include/llvm/Analysis/MemoryLocation.h
@@ -27,13 +27,15 @@
 class LoadInst;
 class StoreInst;
 class MemTransferInst;
-class MemIntrinsic;
+class MemWriteIntrinsic;
 class AtomicCmpXchgInst;
 class AtomicMemTransferInst;
 class AtomicMemIntrinsic;
+class AtomicMemWriteIntrinsic;
 class AtomicRMWInst;
 class AnyMemTransferInst;
 class AnyMemIntrinsic;
+class AnyMemWriteIntrinsic;
 class TargetLibraryInfo;
 class VAArgInst;
 
@@ -250,9 +252,9 @@
 
   /// Return a location representing the destination of a memory set or
   /// transfer.
-  static MemoryLocation getForDest(const MemIntrinsic *MI);
-  static MemoryLocation getForDest(const AtomicMemIntrinsic *MI);
-  static MemoryLocation getForDest(const AnyMemIntrinsic *MI);
+  static MemoryLocation getForDest(const MemWriteIntrinsic *MI);
+  static MemoryLocation getForDest(const AtomicMemWriteIntrinsic *MI);
+  static MemoryLocation getForDest(const AnyMemWriteIntrinsic *MI);
 
   /// Return a location representing a particular argument of a call.
   static MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx,
diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h
--- a/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/llvm/include/llvm/IR/IntrinsicInst.h
@@ -631,68 +631,72 @@
 /// three atomicity hierarchies.
 template <typename Derived> class MemIntrinsicBase : public IntrinsicInst {
 private:
-  enum { ARG_DEST = 0, ARG_LENGTH = 2 };
+  enum { ARG_LENGTH = 2 };
 
-public:
-  Value *getRawDest() const {
-    return const_cast<Value *>(getArgOperand(ARG_DEST));
+protected:
+  Value *getRawArg(unsigned ArgIndex) const {
+    return const_cast<Value *>(getArgOperand(ArgIndex));
   }
-  const Use &getRawDestUse() const { return getArgOperandUse(ARG_DEST); }
-  Use &getRawDestUse() { return getArgOperandUse(ARG_DEST); }
 
-  Value *getLength() const {
-    return const_cast<Value *>(getArgOperand(ARG_LENGTH));
+  const Use &getRawArgUse(unsigned ArgIndex) const {
+    return getArgOperandUse(ArgIndex);
   }
-  const Use &getLengthUse() const { return getArgOperandUse(ARG_LENGTH); }
-  Use &getLengthUse() { return getArgOperandUse(ARG_LENGTH); }
 
-  /// This is just like getRawDest, but it strips off any cast
-  /// instructions (including addrspacecast) that feed it, giving the
-  /// original input.  The returned value is guaranteed to be a pointer.
-  Value *getDest() const { return getRawDest()->stripPointerCasts(); }
+  Use &getRawArgUse(unsigned ArgIndex) { return getArgOperandUse(ArgIndex); }
 
-  unsigned getDestAddressSpace() const {
-    return cast<PointerType>(getRawDest()->getType())->getAddressSpace();
+  Value *getArg(unsigned ArgIndex) const {
+    return getRawArg(ArgIndex)->stripPointerCasts();
+  }
+
+  unsigned getArgAddressSpace(unsigned ArgIndex) const {
+    return cast<PointerType>(getRawArg(ArgIndex)->getType())->getAddressSpace();
   }
 
   /// FIXME: Remove this function once transition to Align is over.
-  /// Use getDestAlign() instead.
-  unsigned getDestAlignment() const {
-    if (auto MA = getParamAlign(ARG_DEST))
+  /// Use getArgAlign() instead.
+  unsigned getArgAlignment(unsigned ArgIndex) const {
+    if (auto MA = getParamAlign(ArgIndex))
       return MA->value();
     return 0;
   }
-  MaybeAlign getDestAlign() const { return getParamAlign(ARG_DEST); }
 
-  /// Set the specified arguments of the instruction.
-  void setDest(Value *Ptr) {
-    assert(getRawDest()->getType() == Ptr->getType() &&
-           "setDest called with pointer of wrong type!");
-    setArgOperand(ARG_DEST, Ptr);
+  MaybeAlign getArgAlign(unsigned ArgIndex) const {
+    return getParamAlign(ArgIndex);
+  }
+
+  void setArg(unsigned ArgIndex, Value *Ptr) {
+    assert(getRawArg(ArgIndex)->getType() == Ptr->getType() &&
+           "setArg called with pointer of wrong type!");
+    setArgOperand(ArgIndex, Ptr);
   }
 
   /// FIXME: Remove this function once transition to Align is over.
   /// Use the version that takes MaybeAlign instead of this one.
-  void setDestAlignment(unsigned Alignment) {
-    setDestAlignment(MaybeAlign(Alignment));
+  void setArgAlignment(unsigned ArgIndex, unsigned Alignment) {
+    setArgAlignment(ArgIndex, MaybeAlign(Alignment));
   }
-  void setDestAlignment(MaybeAlign Alignment) {
-    removeParamAttr(ARG_DEST, Attribute::Alignment);
+
+  void setArgAlignment(unsigned ArgIndex, MaybeAlign Alignment) {
+    removeParamAttr(ArgIndex, Attribute::Alignment);
     if (Alignment)
-      addParamAttr(ARG_DEST,
+      addParamAttr(ArgIndex,
                    Attribute::getWithAlignment(getContext(), *Alignment));
   }
-  void setDestAlignment(Align Alignment) {
-    removeParamAttr(ARG_DEST, Attribute::Alignment);
-    addParamAttr(ARG_DEST,
+
+  void setArgAlignment(unsigned ArgIndex, Align Alignment) {
+    removeParamAttr(ArgIndex, Attribute::Alignment);
+    addParamAttr(ArgIndex,
                  Attribute::getWithAlignment(getContext(), Alignment));
   }
 
-  void setLength(Value *L) {
-    assert(getLength()->getType() == L->getType() &&
-           "setLength called with value of wrong type!");
-    setArgOperand(ARG_LENGTH, L);
-  }
+public:
+  Value *getLength() const { return getRawArg(ARG_LENGTH); }
+
+  const Use &getLengthUse() const { return getRawArgUse(ARG_LENGTH); }
+
+  Use &getLengthUse() { return getRawArgUse(ARG_LENGTH); }
+
+  void setLength(Value *L) { setArg(ARG_LENGTH, L); }
 };
 
 /// Common base class for all memory transfer intrinsics. Simply provides
@@ -703,56 +707,45 @@
 
 public:
   /// Return the arguments to the instruction.
-  Value *getRawSource() const {
-    return const_cast<Value *>(BaseCL::getArgOperand(ARG_SOURCE));
-  }
+  Value *getRawSource() const { return BaseCL::getRawArg(ARG_SOURCE); }
+
   const Use &getRawSourceUse() const {
-    return BaseCL::getArgOperandUse(ARG_SOURCE);
+    return BaseCL::getRawArgUse(ARG_SOURCE);
   }
-  Use &getRawSourceUse() { return BaseCL::getArgOperandUse(ARG_SOURCE); }
+
+  Use &getRawSourceUse() { return BaseCL::getRawArgUse(ARG_SOURCE); }
 
   /// This is just like getRawSource, but it strips off any cast
   /// instructions that feed it, giving the original input.  The returned
   /// value is guaranteed to be a pointer.
-  Value *getSource() const { return getRawSource()->stripPointerCasts(); }
+  Value *getSource() const { return BaseCL::getArg(ARG_SOURCE); }
 
   unsigned getSourceAddressSpace() const {
-    return cast<PointerType>(getRawSource()->getType())->getAddressSpace();
+    return BaseCL::getArgAddressSpace(ARG_SOURCE);
   }
 
   /// FIXME: Remove this function once transition to Align is over.
   /// Use getSourceAlign() instead.
   unsigned getSourceAlignment() const {
-    if (auto MA = BaseCL::getParamAlign(ARG_SOURCE))
-      return MA->value();
-    return 0;
+    return BaseCL::getArgAlignment(ARG_SOURCE);
   }
 
-  MaybeAlign getSourceAlign() const {
-    return BaseCL::getParamAlign(ARG_SOURCE);
-  }
+  MaybeAlign getSourceAlign() const { return BaseCL::getArgAlign(ARG_SOURCE); }
 
-  void setSource(Value *Ptr) {
-    assert(getRawSource()->getType() == Ptr->getType() &&
-           "setSource called with pointer of wrong type!");
-    BaseCL::setArgOperand(ARG_SOURCE, Ptr);
-  }
+  void setSource(Value *Ptr) { BaseCL::setArg(ARG_SOURCE, Ptr); }
 
   /// FIXME: Remove this function once transition to Align is over.
   /// Use the version that takes MaybeAlign instead of this one.
   void setSourceAlignment(unsigned Alignment) {
-    setSourceAlignment(MaybeAlign(Alignment));
+    BaseCL::setArgAlignment(ARG_SOURCE, Alignment);
   }
+
   void setSourceAlignment(MaybeAlign Alignment) {
-    BaseCL::removeParamAttr(ARG_SOURCE, Attribute::Alignment);
-    if (Alignment)
-      BaseCL::addParamAttr(ARG_SOURCE, Attribute::getWithAlignment(
-                                           BaseCL::getContext(), *Alignment));
+    BaseCL::setArgAlignment(ARG_SOURCE, Alignment);
   }
+
   void setSourceAlignment(Align Alignment) {
-    BaseCL::removeParamAttr(ARG_SOURCE, Attribute::Alignment);
-    BaseCL::addParamAttr(ARG_SOURCE, Attribute::getWithAlignment(
-                                         BaseCL::getContext(), Alignment));
+    BaseCL::setArgAlignment(ARG_SOURCE, Alignment);
   }
 };
 
@@ -763,17 +756,13 @@
   enum { ARG_VALUE = 1 };
 
 public:
-  Value *getValue() const {
-    return const_cast<Value *>(BaseCL::getArgOperand(ARG_VALUE));
-  }
-  const Use &getValueUse() const { return BaseCL::getArgOperandUse(ARG_VALUE); }
-  Use &getValueUse() { return BaseCL::getArgOperandUse(ARG_VALUE); }
+  Value *getValue() const { return BaseCL::getRawArg(ARG_VALUE); }
 
-  void setValue(Value *Val) {
-    assert(getValue()->getType() == Val->getType() &&
-           "setValue called with value of wrong type!");
-    BaseCL::setArgOperand(ARG_VALUE, Val);
-  }
+  const Use &getValueUse() const { return BaseCL::getArgUse(ARG_VALUE); }
+
+  Use &getValueUse() { return BaseCL::getArgUse(ARG_VALUE); }
+
+  void setValue(Value *Val) { BaseCL::setArg(ARG_VALUE, Val); }
 };
 
 // The common base class for the atomic memset/memmove/memcpy intrinsics
@@ -783,9 +772,7 @@
   enum { ARG_ELEMENTSIZE = 3 };
 
 public:
-  Value *getRawElementSizeInBytes() const {
-    return const_cast<Value *>(getArgOperand(ARG_ELEMENTSIZE));
-  }
+  Value *getRawElementSizeInBytes() const { return getRawArg(ARG_ELEMENTSIZE); }
 
   ConstantInt *getElementSizeInBytesCst() const {
     return cast<ConstantInt>(getRawElementSizeInBytes());
@@ -795,11 +782,7 @@
     return getElementSizeInBytesCst()->getZExtValue();
   }
 
-  void setElementSizeInBytes(Constant *V) {
-    assert(V->getType() == Type::getInt8Ty(getContext()) &&
-           "setElementSizeInBytes called with value of wrong type!");
-    setArgOperand(ARG_ELEMENTSIZE, V);
-  }
+  void setElementSizeInBytes(Constant *V) { setArg(ARG_ELEMENTSIZE, V); }
 
   static bool classof(const IntrinsicInst *I) {
     switch (I->getIntrinsicID()) {
@@ -816,9 +799,74 @@
   }
 };
 
+// The common base class for all memory write intrinsics (memcpy/memset/memmove)
+template <class BaseCL> class MemWriteIntrinsicBase : public BaseCL {
+private:
+  enum { ARG_DEST = 0 };
+
+public:
+  /// This is just like getRawDest, but it strips off any cast
+  /// instructions (including addrspacecast) that feed it, giving the
+  /// original input.  The returned value is guaranteed to be a pointer.
+  Value *getDest() const { return BaseCL::getArg(ARG_DEST); }
+
+  unsigned getDestAddressSpace() const {
+    return BaseCL::getArgAddressSpace(ARG_DEST);
+  }
+
+  /// FIXME: Remove this function once transition to Align is over.
+  /// Use getDestAlign() instead.
+  unsigned getDestAlignment() const {
+    return BaseCL::getArgAlignment(ARG_DEST);
+  }
+
+  MaybeAlign getDestAlign() const { return BaseCL::getArgAlign(ARG_DEST); }
+
+  /// Set the specified arguments of the instruction.
+  void setDest(Value *Ptr) { BaseCL::setArg(ARG_DEST, Ptr); }
+
+  /// FIXME: Remove this function once transition to Align is over.
+  /// Use the version that takes MaybeAlign instead of this one.
+  void setDestAlignment(unsigned Alignment) {
+    BaseCL::setArgAlignment(ARG_DEST, Alignment);
+  }
+
+  void setDestAlignment(MaybeAlign Alignment) {
+    BaseCL::setArgAlignment(ARG_DEST, Alignment);
+  }
+
+  void setDestAlignment(Align Alignment) {
+    BaseCL::setArgAlignment(ARG_DEST, Alignment);
+  }
+
+  Value *getRawDest() const { return BaseCL::getRawArg(ARG_DEST); }
+
+  const Use &getRawDestUse() const { return BaseCL::getRawArgUse(ARG_DEST); }
+
+  Use &getRawDestUse() { return BaseCL::getRawArgUse(ARG_DEST); }
+};
+
+class AtomicMemWriteIntrinsic
+    : public MemWriteIntrinsicBase<AtomicMemIntrinsic> {
+public:
+  static bool classof(const IntrinsicInst *I) {
+    switch (I->getIntrinsicID()) {
+    case Intrinsic::memcpy_element_unordered_atomic:
+    case Intrinsic::memset_element_unordered_atomic:
+    case Intrinsic::memmove_element_unordered_atomic:
+      return true;
+    default:
+      return false;
+    }
+  }
+  static bool classof(const Value *V) {
+    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+  }
+};
+
 /// This class represents atomic memset intrinsic
 // i.e. llvm.element.unordered.atomic.memset
-class AtomicMemSetInst : public MemSetBase<AtomicMemIntrinsic> {
+class AtomicMemSetInst : public MemSetBase<AtomicMemWriteIntrinsic> {
 public:
   static bool classof(const IntrinsicInst *I) {
     return I->getIntrinsicID() == Intrinsic::memset_element_unordered_atomic;
@@ -830,7 +878,7 @@
 
 // This class wraps the atomic memcpy/memmove intrinsics
 // i.e. llvm.element.unordered.atomic.memcpy/memmove
-class AtomicMemTransferInst : public MemTransferBase<AtomicMemIntrinsic> {
+class AtomicMemTransferInst : public MemTransferBase<AtomicMemWriteIntrinsic> {
 public:
   static bool classof(const IntrinsicInst *I) {
     switch (I->getIntrinsicID()) {
@@ -901,8 +949,27 @@
   }
 };
 
+class MemWriteIntrinsic : public MemWriteIntrinsicBase<MemIntrinsic> {
+public:
+  // Methods for support type inquiry through isa, cast, and dyn_cast:
+  static bool classof(const IntrinsicInst *I) {
+    switch (I->getIntrinsicID()) {
+    case Intrinsic::memcpy:
+    case Intrinsic::memmove:
+    case Intrinsic::memset:
+    case Intrinsic::memcpy_inline:
+      return true;
+    default:
+      return false;
+    }
+  }
+  static bool classof(const Value *V) {
+    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+  }
+};
+
 /// This class wraps the llvm.memset intrinsic.
-class MemSetInst : public MemSetBase<MemIntrinsic> {
+class MemSetInst : public MemSetBase<MemWriteIntrinsic> {
 public:
   // Methods for support type inquiry through isa, cast, and dyn_cast:
   static bool classof(const IntrinsicInst *I) {
@@ -914,7 +981,7 @@
 };
 
 /// This class wraps the llvm.memcpy/memmove intrinsics.
-class MemTransferInst : public MemTransferBase<MemIntrinsic> {
+class MemTransferInst : public MemTransferBase<MemWriteIntrinsic> {
 public:
   // Methods for support type inquiry through isa, cast, and dyn_cast:
   static bool classof(const IntrinsicInst *I) {
@@ -1004,10 +1071,31 @@
   }
 };
 
+class AnyMemWriteIntrinsic : public MemWriteIntrinsicBase<AnyMemIntrinsic> {
+public:
+  static bool classof(const IntrinsicInst *I) {
+    switch (I->getIntrinsicID()) {
+    case Intrinsic::memcpy:
+    case Intrinsic::memcpy_inline:
+    case Intrinsic::memmove:
+    case Intrinsic::memset:
+    case Intrinsic::memcpy_element_unordered_atomic:
+    case Intrinsic::memmove_element_unordered_atomic:
+    case Intrinsic::memset_element_unordered_atomic:
+      return true;
+    default:
+      return false;
+    }
+  }
+  static bool classof(const Value *V) {
+    return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+  }
+};
+
 /// This class represents any memset intrinsic
 // i.e. llvm.element.unordered.atomic.memset
 // and  llvm.memset
-class AnyMemSetInst : public MemSetBase<AnyMemIntrinsic> {
+class AnyMemSetInst : public MemSetBase<AnyMemWriteIntrinsic> {
 public:
   static bool classof(const IntrinsicInst *I) {
     switch (I->getIntrinsicID()) {
@@ -1026,7 +1114,7 @@
 // This class wraps any memcpy/memmove intrinsics
 // i.e. llvm.element.unordered.atomic.memcpy/memmove
 // and  llvm.memcpy/memmove
-class AnyMemTransferInst : public MemTransferBase<AnyMemIntrinsic> {
+class AnyMemTransferInst : public MemTransferBase<AnyMemWriteIntrinsic> {
 public:
   static bool classof(const IntrinsicInst *I) {
     switch (I->getIntrinsicID()) {
diff --git a/llvm/include/llvm/Transforms/Utils/VNCoercion.h b/llvm/include/llvm/Transforms/Utils/VNCoercion.h
--- a/llvm/include/llvm/Transforms/Utils/VNCoercion.h
+++ b/llvm/include/llvm/Transforms/Utils/VNCoercion.h
@@ -25,7 +25,7 @@
 class Constant;
 class StoreInst;
 class LoadInst;
-class MemIntrinsic;
+class MemWriteIntrinsic;
 class Instruction;
 class IRBuilderBase;
 class Value;
@@ -68,7 +68,8 @@
 /// On success, it returns the offset into DepMI that extraction would start.
 /// On failure, it returns -1.
 int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
-                                     MemIntrinsic *DepMI, const DataLayout &DL);
+                                     MemWriteIntrinsic *DepMI,
+                                     const DataLayout &DL);
 
 /// If analyzeLoadFromClobberingStore returned an offset, this function can be
 /// used to actually perform the extraction of the bits from the store. It
@@ -95,13 +96,14 @@
 /// used to actually perform the extraction of the bits from the memory
 /// intrinsic.  It inserts instructions to do so at InsertPt, and returns the
 /// extracted value.
-Value *getMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
+Value *getMemInstValueForLoad(MemWriteIntrinsic *SrcInst, unsigned Offset,
                               Type *LoadTy, Instruction *InsertPt,
                               const DataLayout &DL);
 // This is the same as getStoreValueForLoad, except it performs no insertion.
 // It returns nullptr if it cannot produce a constant.
-Constant *getConstantMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
-                                         Type *LoadTy, const DataLayout &DL);
+Constant *getConstantMemInstValueForLoad(MemWriteIntrinsic *SrcInst,
+                                         unsigned Offset, Type *LoadTy,
+                                         const DataLayout &DL);
 }
 }
 #endif
diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
--- a/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -634,21 +634,23 @@
     PtrSet.insert(getUnderlyingObject(Ptr));
 }
 
-static void AddNonNullPointersByInstruction(
-    Instruction *I, NonNullPointerSet &PtrSet) {
+static void AddNonNullPointersByInstruction(Instruction *I,
+                                            NonNullPointerSet &PtrSet) {
   if (LoadInst *L = dyn_cast<LoadInst>(I)) {
     AddNonNullPointer(L->getPointerOperand(), PtrSet);
   } else if (StoreInst *S = dyn_cast<StoreInst>(I)) {
     AddNonNullPointer(S->getPointerOperand(), PtrSet);
-  } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
-    if (MI->isVolatile()) return;
+  } else if (MemWriteIntrinsic *MWI = dyn_cast<MemWriteIntrinsic>(I)) {
+    if (MWI->isVolatile())
+      return;
 
     // FIXME: check whether it has a valuerange that excludes zero?
-    ConstantInt *Len = dyn_cast<ConstantInt>(MI->getLength());
-    if (!Len || Len->isZero()) return;
+    ConstantInt *Len = dyn_cast<ConstantInt>(MWI->getLength());
+    if (!Len || Len->isZero())
+      return;
 
-    AddNonNullPointer(MI->getRawDest(), PtrSet);
-    if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
+    AddNonNullPointer(MWI->getRawDest(), PtrSet);
+    if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MWI))
       AddNonNullPointer(MTI->getRawSource(), PtrSet);
   }
 }
diff --git a/llvm/lib/Analysis/MemoryLocation.cpp b/llvm/lib/Analysis/MemoryLocation.cpp
--- a/llvm/lib/Analysis/MemoryLocation.cpp
+++ b/llvm/lib/Analysis/MemoryLocation.cpp
@@ -123,25 +123,25 @@
   return MemoryLocation(MTI->getRawSource(), Size, AATags);
 }
 
-MemoryLocation MemoryLocation::getForDest(const MemIntrinsic *MI) {
-  return getForDest(cast<AnyMemIntrinsic>(MI));
+MemoryLocation MemoryLocation::getForDest(const MemWriteIntrinsic *MI) {
+  return getForDest(cast<AnyMemWriteIntrinsic>(MI));
 }
 
-MemoryLocation MemoryLocation::getForDest(const AtomicMemIntrinsic *MI) {
-  return getForDest(cast<AnyMemIntrinsic>(MI));
+MemoryLocation MemoryLocation::getForDest(const AtomicMemWriteIntrinsic *AMI) {
+  return getForDest(cast<AnyMemWriteIntrinsic>(AMI));
 }
 
-MemoryLocation MemoryLocation::getForDest(const AnyMemIntrinsic *MI) {
+MemoryLocation MemoryLocation::getForDest(const AnyMemWriteIntrinsic *MWI) {
   auto Size = LocationSize::afterPointer();
-  if (ConstantInt *C = dyn_cast<ConstantInt>(MI->getLength()))
+  if (ConstantInt *C = dyn_cast<ConstantInt>(MWI->getLength()))
     Size = LocationSize::precise(C->getValue().getZExtValue());
 
   // memcpy/memmove can have AA tags. For memcpy, they apply
   // to both the source and the destination.
   AAMDNodes AATags;
-  MI->getAAMetadata(AATags);
+  MWI->getAAMetadata(AATags);
 
-  return MemoryLocation(MI->getRawDest(), Size, AATags);
+  return MemoryLocation(MWI->getRawDest(), Size, AATags);
 }
 
 MemoryLocation MemoryLocation::getForArgument(const CallBase *Call,
diff --git a/llvm/lib/Analysis/StackSafetyAnalysis.cpp b/llvm/lib/Analysis/StackSafetyAnalysis.cpp
--- a/llvm/lib/Analysis/StackSafetyAnalysis.cpp
+++ b/llvm/lib/Analysis/StackSafetyAnalysis.cpp
@@ -306,8 +306,8 @@
   if (const auto *MTI = dyn_cast<MemTransferInst>(MI)) {
     if (MTI->getRawSource() != U && MTI->getRawDest() != U)
       return ConstantRange::getEmpty(PointerSize);
-  } else {
-    if (MI->getRawDest() != U)
+  } else if (const auto *MWI = dyn_cast<MemWriteIntrinsic>(MI)) {
+    if (MWI->getRawDest() != U)
       return ConstantRange::getEmpty(PointerSize);
   }
 
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -2139,12 +2139,12 @@
     }
     // If this is a memcpy (or similar) then we may be able to improve the
     // alignment
-    if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) {
-      Align DestAlign = getKnownAlignment(MI->getDest(), *DL);
-      MaybeAlign MIDestAlign = MI->getDestAlign();
-      if (!MIDestAlign || DestAlign > *MIDestAlign)
-        MI->setDestAlignment(DestAlign);
-      if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
+    if (MemWriteIntrinsic *MWI = dyn_cast<MemWriteIntrinsic>(CI)) {
+      Align DestAlign = getKnownAlignment(MWI->getDest(), *DL);
+      MaybeAlign MWIDestAlign = MWI->getDestAlign();
+      if (!MWIDestAlign || DestAlign > *MWIDestAlign)
+        MWI->setDestAlignment(DestAlign);
+      if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MWI)) {
         MaybeAlign MTISrcAlign = MTI->getSourceAlign();
         Align SrcAlign = getKnownAlignment(MTI->getSource(), *DL);
         if (!MTISrcAlign || SrcAlign > *MTISrcAlign)
diff --git a/llvm/lib/CodeGen/SafeStack.cpp b/llvm/lib/CodeGen/SafeStack.cpp
--- a/llvm/lib/CodeGen/SafeStack.cpp
+++ b/llvm/lib/CodeGen/SafeStack.cpp
@@ -264,17 +264,18 @@
 bool SafeStack::IsMemIntrinsicSafe(const MemIntrinsic *MI, const Use &U,
                                    const Value *AllocaPtr,
                                    uint64_t AllocaSize) {
-  if (auto MTI = dyn_cast<MemTransferInst>(MI)) {
+  if (const auto *MTI = dyn_cast<MemTransferInst>(MI)) {
     if (MTI->getRawSource() != U && MTI->getRawDest() != U)
       return true;
-  } else {
-    if (MI->getRawDest() != U)
+  } else if (const auto *MWI = dyn_cast<MemWriteIntrinsic>(MI)) {
+    if (MWI->getRawDest() != U)
       return true;
   }
 
   const auto *Len = dyn_cast<ConstantInt>(MI->getLength());
   // Non-constant size => unsafe. FIXME: try SCEV getRange.
-  if (!Len) return false;
+  if (!Len)
+    return false;
   return IsAccessSafe(U, Len->getZExtValue(), AllocaPtr, AllocaSize);
 }
 
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -3930,12 +3930,12 @@
     Value *Args[4] = {CI->getArgOperand(0), CI->getArgOperand(1),
                       CI->getArgOperand(2), CI->getArgOperand(4)};
     NewCall = Builder.CreateCall(NewFn, Args);
-    auto *MemCI = cast<MemIntrinsic>(NewCall);
+    auto *MemWriteIntr = cast<MemWriteIntrinsic>(NewCall);
     // All mem intrinsics support dest alignment.
     const ConstantInt *Align = cast<ConstantInt>(CI->getArgOperand(3));
-    MemCI->setDestAlignment(Align->getMaybeAlignValue());
+    MemWriteIntr->setDestAlignment(Align->getMaybeAlignValue());
     // Memcpy/Memmove also support source alignment.
-    if (auto *MTI = dyn_cast<MemTransferInst>(MemCI))
+    if (auto *MTI = dyn_cast<MemTransferInst>(MemWriteIntr))
       MTI->setSourceAlignment(Align->getMaybeAlignValue());
     break;
   }
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -4671,28 +4671,27 @@
   case Intrinsic::memcpy_inline:
   case Intrinsic::memmove:
   case Intrinsic::memset: {
-    const auto *MI = cast<MemIntrinsic>(&Call);
+    const auto *MWI = cast<MemWriteIntrinsic>(&Call);
     auto IsValidAlignment = [&](unsigned Alignment) -> bool {
       return Alignment == 0 || isPowerOf2_32(Alignment);
     };
-    Assert(IsValidAlignment(MI->getDestAlignment()),
+    Assert(IsValidAlignment(MWI->getDestAlignment()),
            "alignment of arg 0 of memory intrinsic must be 0 or a power of 2",
            Call);
-    if (const auto *MTI = dyn_cast<MemTransferInst>(MI)) {
+    if (const auto *MTI = dyn_cast<MemTransferInst>(MWI)) {
       Assert(IsValidAlignment(MTI->getSourceAlignment()),
              "alignment of arg 1 of memory intrinsic must be 0 or a power of 2",
              Call);
     }
-
     break;
   }
   case Intrinsic::memcpy_element_unordered_atomic:
   case Intrinsic::memmove_element_unordered_atomic:
   case Intrinsic::memset_element_unordered_atomic: {
-    const auto *AMI = cast<AtomicMemIntrinsic>(&Call);
+    const auto *AMWI = cast<AtomicMemWriteIntrinsic>(&Call);
 
     ConstantInt *ElementSizeCI =
-        cast<ConstantInt>(AMI->getRawElementSizeInBytes());
+        cast<ConstantInt>(AMWI->getRawElementSizeInBytes());
     const APInt &ElementSizeVal = ElementSizeCI->getValue();
     Assert(ElementSizeVal.isPowerOf2(),
            "element size of the element-wise atomic memory intrinsic "
@@ -4702,10 +4701,10 @@
     auto IsValidAlignment = [&](uint64_t Alignment) {
       return isPowerOf2_64(Alignment) && ElementSizeVal.ule(Alignment);
     };
-    uint64_t DstAlignment = AMI->getDestAlignment();
+    uint64_t DstAlignment = AMWI->getDestAlignment();
     Assert(IsValidAlignment(DstAlignment),
            "incorrect alignment of the destination argument", Call);
-    if (const auto *AMT = dyn_cast<AtomicMemTransferInst>(AMI)) {
+    if (const auto *AMT = dyn_cast<AtomicMemTransferInst>(AMWI)) {
       uint64_t SrcAlignment = AMT->getSourceAlignment();
       Assert(IsValidAlignment(SrcAlignment),
              "incorrect alignment of the source argument", Call);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPerfHintAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPerfHintAnalysis.cpp
--- a/llvm/lib/Target/AMDGPU/AMDGPUPerfHintAnalysis.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPerfHintAnalysis.cpp
@@ -120,20 +120,20 @@
 };
 
 static const Value *getMemoryInstrPtr(const Instruction *Inst) {
-  if (auto LI = dyn_cast<LoadInst>(Inst)) {
+  if (const auto *LI = dyn_cast<LoadInst>(Inst)) {
     return LI->getPointerOperand();
   }
-  if (auto SI = dyn_cast<StoreInst>(Inst)) {
+  if (const auto *SI = dyn_cast<StoreInst>(Inst)) {
     return SI->getPointerOperand();
   }
-  if (auto AI = dyn_cast<AtomicCmpXchgInst>(Inst)) {
+  if (const auto *AI = dyn_cast<AtomicCmpXchgInst>(Inst)) {
     return AI->getPointerOperand();
   }
-  if (auto AI = dyn_cast<AtomicRMWInst>(Inst)) {
+  if (const auto *AI = dyn_cast<AtomicRMWInst>(Inst)) {
     return AI->getPointerOperand();
   }
-  if (auto MI = dyn_cast<AnyMemIntrinsic>(Inst)) {
-    return MI->getRawDest();
+  if (const auto *MWI = dyn_cast<AnyMemWriteIntrinsic>(Inst)) {
+    return MWI->getRawDest();
   }
 
   return nullptr;
diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp
--- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp
@@ -349,9 +349,10 @@
         GEP->eraseFromParent();
         Changed = true;
       }
-    } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U)) { // memset/cpy/mv
-      if (MI->getRawDest() == V) {
-        MI->eraseFromParent();
+    } else if (MemWriteIntrinsic *MWI = dyn_cast<MemWriteIntrinsic>(U)) {
+      // memset/cpy/mv
+      if (MWI->getRawDest() == V) {
+        MWI->eraseFromParent();
         Changed = true;
       }
 
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -889,11 +889,11 @@
 
   // Intrinsics cannot occur in an invoke or a callbr, so handle them here
   // instead of in visitCallBase.
-  if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) {
+  if (auto *MWI = dyn_cast<AnyMemWriteIntrinsic>(II)) {
     bool Changed = false;
 
     // memmove/cpy/set of zero bytes is a noop.
-    if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
+    if (Constant *NumBytes = dyn_cast<Constant>(MWI->getLength())) {
       if (NumBytes->isNullValue())
         return eraseInstFromFunction(CI);
 
@@ -906,14 +906,14 @@
     }
 
     // No other transformations apply to volatile transfers.
-    if (auto *M = dyn_cast<MemIntrinsic>(MI))
+    if (auto *M = dyn_cast<MemIntrinsic>(MWI))
       if (M->isVolatile())
         return nullptr;
 
     // If we have a memmove and the source operation is a constant global,
     // then the source and dest pointers can't alias, so we can change this
     // into a call to memcpy.
-    if (auto *MMI = dyn_cast<AnyMemMoveInst>(MI)) {
+    if (auto *MMI = dyn_cast<AnyMemMoveInst>(MWI)) {
       if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
         if (GVSrc->isConstant()) {
           Module *M = CI.getModule();
@@ -929,7 +929,7 @@
         }
     }
 
-    if (AnyMemTransferInst *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
+    if (AnyMemTransferInst *MTI = dyn_cast<AnyMemTransferInst>(MWI)) {
       // memmove(x,x,size) -> noop.
       if (MTI->getSource() == MTI->getDest())
         return eraseInstFromFunction(CI);
@@ -937,10 +937,10 @@
 
     // If we can determine a pointer alignment that is bigger than currently
     // set, update the alignment.
-    if (auto *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
+    if (auto *MTI = dyn_cast<AnyMemTransferInst>(MWI)) {
       if (Instruction *I = SimplifyAnyMemTransfer(MTI))
         return I;
-    } else if (auto *MSI = dyn_cast<AnyMemSetInst>(MI)) {
+    } else if (auto *MSI = dyn_cast<AnyMemSetInst>(MWI)) {
       if (Instruction *I = SimplifyAnyMemSet(MSI))
         return I;
     }
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2613,8 +2613,8 @@
           case Intrinsic::memmove:
           case Intrinsic::memcpy:
           case Intrinsic::memset: {
-            MemIntrinsic *MI = cast<MemIntrinsic>(II);
-            if (MI->isVolatile() || MI->getRawDest() != PI)
+            MemWriteIntrinsic *MWI = cast<MemWriteIntrinsic>(II);
+            if (MWI->isVolatile() || MWI->getRawDest() != PI)
               return false;
             LLVM_FALLTHROUGH;
           }
diff --git a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
--- a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
+++ b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
@@ -278,22 +278,22 @@
         SI->setAlignment(NewAlignment);
         ++NumStoreAlignChanged;
       }
-    } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(J)) {
+    } else if (MemWriteIntrinsic *MWI = dyn_cast<MemWriteIntrinsic>(J)) {
       if (!isValidAssumeForContext(ACall, J, DT))
         continue;
       Align NewDestAlignment =
-          getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MI->getDest(), SE);
+          getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MWI->getDest(), SE);
 
       LLVM_DEBUG(dbgs() << "\tmem inst: " << DebugStr(NewDestAlignment)
                         << "\n";);
-      if (NewDestAlignment > *MI->getDestAlign()) {
-        MI->setDestAlignment(NewDestAlignment);
+      if (NewDestAlignment > *MWI->getDestAlign()) {
+        MWI->setDestAlignment(NewDestAlignment);
         ++NumMemIntAlignChanged;
       }
 
       // For memory transfers, there is also a source alignment that
       // can be set.
-      if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
+      if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MWI)) {
         Align NewSrcAlignment =
             getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MTI->getSource(), SE);
 
diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -214,8 +214,8 @@
     return MemoryLocation::get(SI);
 
   // memcpy/memmove/memset.
-  if (auto *MI = dyn_cast<AnyMemIntrinsic>(Inst))
-    return MemoryLocation::getForDest(MI);
+  if (auto *MWI = dyn_cast<AnyMemWriteIntrinsic>(Inst))
+    return MemoryLocation::getForDest(MWI);
 
   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
     switch (II->getIntrinsicID()) {
@@ -578,7 +578,7 @@
 static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierStart,
                          uint64_t &EarlierSize, int64_t LaterStart,
                          uint64_t LaterSize, bool IsOverwriteEnd) {
-  auto *EarlierIntrinsic = cast<AnyMemIntrinsic>(EarlierWrite);
+  auto *EarlierIntrinsic = cast<AnyMemWriteIntrinsic>(EarlierWrite);
   Align PrefAlign = EarlierIntrinsic->getDestAlign().valueOrOne();
 
   // We assume that memet/memcpy operates in chunks of the "largest" native
@@ -1104,8 +1104,8 @@
     if (!I->mayWriteToMemory())
       return None;
 
-    if (auto *MTI = dyn_cast<AnyMemIntrinsic>(I))
-      return {MemoryLocation::getForDest(MTI)};
+    if (auto *MWI = dyn_cast<AnyMemWriteIntrinsic>(I))
+      return {MemoryLocation::getForDest(MWI)};
 
     if (auto *CB = dyn_cast<CallBase>(I)) {
       // If the functions may write to memory we do not know about, bail out.
diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp
--- a/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -199,9 +199,9 @@
     return Res;
   }
 
-  static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset = 0) {
+  static AvailableValue getMI(MemWriteIntrinsic *MWI, unsigned Offset = 0) {
     AvailableValue Res;
-    Res.Val.setPointer(MI);
+    Res.Val.setPointer(MWI);
     Res.Val.setInt(MemIntrin);
     Res.Offset = Offset;
     return Res;
@@ -238,9 +238,9 @@
     return cast<LoadInst>(Val.getPointer());
   }
 
-  MemIntrinsic *getMemIntrinValue() const {
+  MemWriteIntrinsic *getMemIntrinValue() const {
     assert(isMemIntrinValue() && "Wrong accessor");
-    return cast<MemIntrinsic>(Val.getPointer());
+    return cast<MemWriteIntrinsic>(Val.getPointer());
   }
 
   /// Emit code at the specified insertion point to adjust the value defined
@@ -1061,7 +1061,7 @@
 
     // If the clobbering value is a memset/memcpy/memmove, see if we can
     // forward a value on from it.
-    if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInst)) {
+    if (MemWriteIntrinsic *DepMI = dyn_cast<MemWriteIntrinsic>(DepInst)) {
       if (Address && !Load->isAtomic()) {
         int Offset = analyzeLoadFromClobberingMemInst(Load->getType(), Address,
                                                       DepMI, DL);
diff --git a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
--- a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
+++ b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
@@ -450,12 +450,12 @@
       PushPtrOperand(RMW->getPointerOperand());
     else if (auto *CmpX = dyn_cast<AtomicCmpXchgInst>(&I))
       PushPtrOperand(CmpX->getPointerOperand());
-    else if (auto *MI = dyn_cast<MemIntrinsic>(&I)) {
+    else if (auto *MWI = dyn_cast<MemWriteIntrinsic>(&I)) {
       // For memset/memcpy/memmove, any pointer operand can be replaced.
-      PushPtrOperand(MI->getRawDest());
+      PushPtrOperand(MWI->getRawDest());
 
       // Handle 2nd operand for memcpy/memmove.
-      if (auto *MTI = dyn_cast<MemTransferInst>(MI))
+      if (auto *MTI = dyn_cast<MemTransferInst>(MWI))
         PushPtrOperand(MTI->getRawSource());
     } else if (auto *II = dyn_cast<IntrinsicInst>(&I))
       collectRewritableIntrinsicOperands(II, PostorderStack, Visited);
diff --git a/llvm/lib/Transforms/Scalar/NewGVN.cpp b/llvm/lib/Transforms/Scalar/NewGVN.cpp
--- a/llvm/lib/Transforms/Scalar/NewGVN.cpp
+++ b/llvm/lib/Transforms/Scalar/NewGVN.cpp
@@ -1468,7 +1468,7 @@
           return createConstantExpression(PossibleConstant);
         }
     }
-  } else if (auto *DepMI = dyn_cast<MemIntrinsic>(DepInst)) {
+  } else if (auto *DepMI = dyn_cast<MemWriteIntrinsic>(DepInst)) {
     int Offset = analyzeLoadFromClobberingMemInst(LoadType, LoadPtr, DepMI, DL);
     if (Offset >= 0) {
       if (auto *PossibleConstant =
diff --git a/llvm/lib/Transforms/Utils/VNCoercion.cpp b/llvm/lib/Transforms/Utils/VNCoercion.cpp
--- a/llvm/lib/Transforms/Utils/VNCoercion.cpp
+++ b/llvm/lib/Transforms/Utils/VNCoercion.cpp
@@ -365,29 +365,30 @@
 }
 
 int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
-                                     MemIntrinsic *MI, const DataLayout &DL) {
+                                     MemWriteIntrinsic *MWI,
+                                     const DataLayout &DL) {
   // If the mem operation is a non-constant size, we can't handle it.
-  ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength());
+  ConstantInt *SizeCst = dyn_cast<ConstantInt>(MWI->getLength());
   if (!SizeCst)
     return -1;
   uint64_t MemSizeInBits = SizeCst->getZExtValue() * 8;
 
   // If this is memset, we just need to see if the offset is valid in the size
   // of the memset..
-  if (MI->getIntrinsicID() == Intrinsic::memset) {
+  if (MWI->getIntrinsicID() == Intrinsic::memset) {
     if (DL.isNonIntegralPointerType(LoadTy->getScalarType())) {
-      auto *CI = dyn_cast<ConstantInt>(cast<MemSetInst>(MI)->getValue());
+      auto *CI = dyn_cast<ConstantInt>(cast<MemSetInst>(MWI)->getValue());
       if (!CI || !CI->isZero())
         return -1;
     }
-    return analyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(),
+    return analyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MWI->getDest(),
                                           MemSizeInBits, DL);
   }
 
   // If we have a memcpy/memmove, the only case we can handle is if this is a
   // copy from constant memory.  In that case, we can read directly from the
   // constant memory.
-  MemTransferInst *MTI = cast<MemTransferInst>(MI);
+  MemTransferInst *MTI = cast<MemTransferInst>(MWI);
 
   Constant *Src = dyn_cast<Constant>(MTI->getSource());
   if (!Src)
@@ -398,7 +399,7 @@
     return -1;
 
   // See if the access is within the bounds of the transfer.
-  int Offset = analyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(),
+  int Offset = analyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MWI->getDest(),
                                               MemSizeInBits, DL);
   if (Offset == -1)
     return Offset;
@@ -543,7 +544,7 @@
 }
 
 template <class T, class HelperClass>
-T *getMemInstValueForLoadHelper(MemIntrinsic *SrcInst, unsigned Offset,
+T *getMemInstValueForLoadHelper(MemWriteIntrinsic *SrcInst, unsigned Offset,
                                 Type *LoadTy, HelperClass &Helper,
                                 const DataLayout &DL) {
   LLVMContext &Ctx = LoadTy->getContext();
@@ -601,7 +602,7 @@
 
 /// This function is called when we have a
 /// memdep query of a load that ends up being a clobbering mem intrinsic.
-Value *getMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
+Value *getMemInstValueForLoad(MemWriteIntrinsic *SrcInst, unsigned Offset,
                               Type *LoadTy, Instruction *InsertPt,
                               const DataLayout &DL) {
   IRBuilder<> Builder(InsertPt);
@@ -609,8 +610,9 @@
                                                           LoadTy, Builder, DL);
 }
 
-Constant *getConstantMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
-                                         Type *LoadTy, const DataLayout &DL) {
+Constant *getConstantMemInstValueForLoad(MemWriteIntrinsic *SrcInst,
+                                         unsigned Offset, Type *LoadTy,
+                                         const DataLayout &DL) {
   // The only case analyzeLoadFromClobberingMemInst cannot be converted to a
   // constant is when it's a memset of a non-constant.
   if (auto *MSI = dyn_cast<MemSetInst>(SrcInst))
diff --git a/polly/include/polly/Support/ScopHelper.h b/polly/include/polly/Support/ScopHelper.h
--- a/polly/include/polly/Support/ScopHelper.h
+++ b/polly/include/polly/Support/ScopHelper.h
@@ -113,20 +113,19 @@
 /// Utility proxy to wrap the common members of LoadInst and StoreInst.
 ///
 /// This works like the LLVM utility class CallSite, ie. it forwards all calls
-/// to either a LoadInst, StoreInst, MemIntrinsic or MemTransferInst.
-/// It is similar to LLVM's utility classes IntrinsicInst, MemIntrinsic,
+/// to either a LoadInst, StoreInst, MemWriteIntrinsic or MemTransferInst.
+/// It is similar to LLVM's utility classes IntrinsicInst, MemWriteIntrinsic,
 /// MemTransferInst, etc. in that it offers a common interface, but does not act
 /// as a fake base class.
 /// It is similar to StringRef and ArrayRef in that it holds a pointer to the
 /// referenced object and should be passed by-value as it is small enough.
 ///
 /// This proxy can either represent a LoadInst instance, a StoreInst instance,
-/// a MemIntrinsic instance (memset, memmove, memcpy), a CallInst instance or a
-/// nullptr (only creatable using the default constructor); never an Instruction
-/// that is neither of the above mentioned. When representing a nullptr, only
-/// the following methods are defined:
-/// isNull(), isInstruction(), isLoad(), isStore(), ..., isMemTransferInst(),
-/// operator bool(), operator!()
+/// a MemWriteIntrinsic instance (memset, memmove, memcpy), a CallInst instance
+/// or a nullptr (only creatable using the default constructor); never an
+/// Instruction that is neither of the above mentioned. When representing a
+/// nullptr, only the following methods are defined: isNull(), isInstruction(),
+/// isLoad(), isStore(), ..., isMemTransferInst(), operator bool(), operator!()
 ///
 /// The functions isa, cast, cast_or_null, dyn_cast are modeled te resemble
 /// those from llvm/Support/Casting.h. Partial template function specialization
@@ -144,18 +143,20 @@
   /* implicit */ MemAccInst(llvm::LoadInst *LI) : I(LI) {}
   /* implicit */ MemAccInst(llvm::StoreInst &SI) : I(&SI) {}
   /* implicit */ MemAccInst(llvm::StoreInst *SI) : I(SI) {}
-  /* implicit */ MemAccInst(llvm::MemIntrinsic *MI) : I(MI) {}
+  /* implicit */ MemAccInst(llvm::MemWriteIntrinsic *MWI) : I(MWI) {}
   /* implicit */ MemAccInst(llvm::CallInst *CI) : I(CI) {}
   explicit MemAccInst(llvm::Instruction &I) : I(&I) { assert(isa(I)); }
   explicit MemAccInst(llvm::Instruction *I) : I(I) { assert(isa(I)); }
 
   static bool isa(const llvm::Value &V) {
     return llvm::isa<llvm::LoadInst>(V) || llvm::isa<llvm::StoreInst>(V) ||
-           llvm::isa<llvm::CallInst>(V) || llvm::isa<llvm::MemIntrinsic>(V);
+           llvm::isa<llvm::CallInst>(V) ||
+           llvm::isa<llvm::MemWriteIntrinsic>(V);
   }
   static bool isa(const llvm::Value *V) {
     return llvm::isa<llvm::LoadInst>(V) || llvm::isa<llvm::StoreInst>(V) ||
-           llvm::isa<llvm::CallInst>(V) || llvm::isa<llvm::MemIntrinsic>(V);
+           llvm::isa<llvm::CallInst>(V) ||
+           llvm::isa<llvm::MemWriteIntrinsic>(V);
   }
   static MemAccInst cast(llvm::Value &V) {
     return MemAccInst(llvm::cast<llvm::Instruction>(V));
@@ -203,12 +204,12 @@
     I = SI;
     return *this;
   }
-  MemAccInst &operator=(llvm::MemIntrinsic &MI) {
+  MemAccInst &operator=(llvm::MemWriteIntrinsic &MI) {
     I = &MI;
     return *this;
   }
-  MemAccInst &operator=(llvm::MemIntrinsic *MI) {
-    I = MI;
+  MemAccInst &operator=(llvm::MemWriteIntrinsic *MWI) {
+    I = MWI;
     return *this;
   }
   MemAccInst &operator=(llvm::CallInst &CI) {
@@ -235,7 +236,7 @@
       return asLoad();
     if (isStore())
       return asStore()->getValueOperand();
-    if (isMemIntrinsic())
+    if (isMemWriteIntrinsic())
       return nullptr;
     if (isCallInst())
       return nullptr;
@@ -246,8 +247,8 @@
       return asLoad()->getPointerOperand();
     if (isStore())
       return asStore()->getPointerOperand();
-    if (isMemIntrinsic())
-      return asMemIntrinsic()->getRawDest();
+    if (isMemWriteIntrinsic())
+      return asMemWriteIntrinsic()->getRawDest();
     if (isCallInst())
       return nullptr;
     llvm_unreachable("Operation not supported on nullptr");
@@ -261,8 +262,8 @@
     if (isMemTransferInst())
       return std::min(asMemTransferInst()->getDestAlignment(),
                       asMemTransferInst()->getSourceAlignment());
-    if (isMemIntrinsic())
-      return asMemIntrinsic()->getDestAlignment();
+    if (isMemWriteIntrinsic())
+      return asMemWriteIntrinsic()->getDestAlignment();
     if (isCallInst())
       return 0;
     llvm_unreachable("Operation not supported on nullptr");
@@ -272,8 +273,8 @@
       return asLoad()->isVolatile();
     if (isStore())
       return asStore()->isVolatile();
-    if (isMemIntrinsic())
-      return asMemIntrinsic()->isVolatile();
+    if (isMemWriteIntrinsic())
+      return asMemWriteIntrinsic()->isVolatile();
     if (isCallInst())
       return false;
     llvm_unreachable("Operation not supported on nullptr");
@@ -283,8 +284,8 @@
       return asLoad()->isSimple();
     if (isStore())
       return asStore()->isSimple();
-    if (isMemIntrinsic())
-      return !asMemIntrinsic()->isVolatile();
+    if (isMemWriteIntrinsic())
+      return !asMemWriteIntrinsic()->isVolatile();
     if (isCallInst())
       return true;
     llvm_unreachable("Operation not supported on nullptr");
@@ -294,7 +295,7 @@
       return asLoad()->getOrdering();
     if (isStore())
       return asStore()->getOrdering();
-    if (isMemIntrinsic())
+    if (isMemWriteIntrinsic())
       return llvm::AtomicOrdering::NotAtomic;
     if (isCallInst())
       return llvm::AtomicOrdering::NotAtomic;
@@ -306,8 +307,8 @@
     if (isStore())
       return asStore()->isUnordered();
     // Copied from the Load/Store implementation of isUnordered:
-    if (isMemIntrinsic())
-      return !asMemIntrinsic()->isVolatile();
+    if (isMemWriteIntrinsic())
+      return !asMemWriteIntrinsic()->isVolatile();
     if (isCallInst())
       return true;
     llvm_unreachable("Operation not supported on nullptr");
@@ -322,7 +323,9 @@
   bool isLoad() const { return I && llvm::isa<llvm::LoadInst>(I); }
   bool isStore() const { return I && llvm::isa<llvm::StoreInst>(I); }
   bool isCallInst() const { return I && llvm::isa<llvm::CallInst>(I); }
-  bool isMemIntrinsic() const { return I && llvm::isa<llvm::MemIntrinsic>(I); }
+  bool isMemWriteIntrinsic() const {
+    return I && llvm::isa<llvm::MemWriteIntrinsic>(I);
+  }
   bool isMemSetInst() const { return I && llvm::isa<llvm::MemSetInst>(I); }
   bool isMemTransferInst() const {
     return I && llvm::isa<llvm::MemTransferInst>(I);
@@ -331,8 +334,8 @@
   llvm::LoadInst *asLoad() const { return llvm::cast<llvm::LoadInst>(I); }
   llvm::StoreInst *asStore() const { return llvm::cast<llvm::StoreInst>(I); }
   llvm::CallInst *asCallInst() const { return llvm::cast<llvm::CallInst>(I); }
-  llvm::MemIntrinsic *asMemIntrinsic() const {
-    return llvm::cast<llvm::MemIntrinsic>(I);
+  llvm::MemWriteIntrinsic *asMemWriteIntrinsic() const {
+    return llvm::cast<llvm::MemWriteIntrinsic>(I);
   }
   llvm::MemSetInst *asMemSetInst() const {
     return llvm::cast<llvm::MemSetInst>(I);
diff --git a/polly/lib/Analysis/ScopBuilder.cpp b/polly/lib/Analysis/ScopBuilder.cpp
--- a/polly/lib/Analysis/ScopBuilder.cpp
+++ b/polly/lib/Analysis/ScopBuilder.cpp
@@ -1751,13 +1751,13 @@
 }
 
 bool ScopBuilder::buildAccessMemIntrinsic(MemAccInst Inst, ScopStmt *Stmt) {
-  auto *MemIntr = dyn_cast_or_null<MemIntrinsic>(Inst);
+  auto *MemWriteIntr = dyn_cast_or_null<MemWriteIntrinsic>(Inst);
 
-  if (MemIntr == nullptr)
+  if (MemWriteIntr == nullptr)
     return false;
 
   auto *L = LI.getLoopFor(Inst->getParent());
-  auto *LengthVal = SE.getSCEVAtScope(MemIntr->getLength(), L);
+  auto *LengthVal = SE.getSCEVAtScope(MemWriteIntr->getLength(), L);
   assert(LengthVal);
 
   // Check if the length val is actually affine or if we overapproximate it
@@ -1773,7 +1773,7 @@
   if (!LengthIsAffine)
     LengthVal = nullptr;
 
-  auto *DestPtrVal = MemIntr->getDest();
+  auto *DestPtrVal = MemWriteIntr->getDest();
   assert(DestPtrVal);
 
   auto *DestAccFunc = SE.getSCEVAtScope(DestPtrVal, L);
@@ -1799,7 +1799,7 @@
                  LengthIsAffine, {DestAccFunc, LengthVal}, {nullptr},
                  Inst.getValueOperand());
 
-  auto *MemTrans = dyn_cast<MemTransferInst>(MemIntr);
+  auto *MemTrans = dyn_cast<MemTransferInst>(MemWriteIntr);
   if (!MemTrans)
     return true;
 
diff --git a/polly/lib/Analysis/ScopDetection.cpp b/polly/lib/Analysis/ScopDetection.cpp
--- a/polly/lib/Analysis/ScopDetection.cpp
+++ b/polly/lib/Analysis/ScopDetection.cpp
@@ -787,7 +787,7 @@
     }
     LLVM_FALLTHROUGH;
   case Intrinsic::memset:
-    AF = SE.getSCEVAtScope(cast<MemIntrinsic>(II).getDest(), L);
+    AF = SE.getSCEVAtScope(cast<MemWriteIntrinsic>(II).getDest(), L);
     if (!AF->isZero()) {
       BP = dyn_cast<SCEVUnknown>(SE.getPointerBase(AF));
       // Bail if the destination pointer is not valid.