Index: include/llvm-c/Core.h =================================================================== --- include/llvm-c/Core.h +++ include/llvm-c/Core.h @@ -1204,10 +1204,10 @@ macro(IntrinsicInst) \ macro(DbgInfoIntrinsic) \ macro(DbgDeclareInst) \ - macro(MemIntrinsic) \ - macro(MemCpyInst) \ - macro(MemMoveInst) \ - macro(MemSetInst) \ + macro(PlainMemIntrinsic) \ + macro(PlainMemCpyInst) \ + macro(PlainMemMoveInst) \ + macro(PlainMemSetInst) \ macro(CmpInst) \ macro(FCmpInst) \ macro(ICmpInst) \ Index: include/llvm/Analysis/AliasSetTracker.h =================================================================== --- include/llvm/Analysis/AliasSetTracker.h +++ include/llvm/Analysis/AliasSetTracker.h @@ -37,8 +37,8 @@ class AliasSetTracker; class BasicBlock; class LoadInst; -class MemSetInst; -class MemTransferInst; +class PlainMemSetInst; +class PlainMemTransferInst; class raw_ostream; class StoreInst; class VAArgInst; @@ -368,8 +368,8 @@ void add(LoadInst *LI); void add(StoreInst *SI); void add(VAArgInst *VAAI); - void add(MemSetInst *MSI); - void add(MemTransferInst *MTI); + void add(PlainMemSetInst *MSI); + void add(PlainMemTransferInst *MTI); void add(Instruction *I); // Dispatch to one of the other add methods... void add(BasicBlock &BB); // Add all instructions in basic block void add(const AliasSetTracker &AST); // Add alias relations from another AST Index: include/llvm/Analysis/MemoryLocation.h =================================================================== --- include/llvm/Analysis/MemoryLocation.h +++ include/llvm/Analysis/MemoryLocation.h @@ -25,8 +25,8 @@ class LoadInst; class StoreInst; -class MemTransferInst; -class MemIntrinsic; +class PlainMemTransferInst; +class PlainMemIntrinsic; class TargetLibraryInfo; /// Representation for a specific memory location. @@ -89,11 +89,11 @@ } /// Return a location representing the source of a memory transfer. - static MemoryLocation getForSource(const MemTransferInst *MTI); + static MemoryLocation getForSource(const PlainMemTransferInst *MTI); /// Return a location representing the destination of a memory set or /// transfer. - static MemoryLocation getForDest(const MemIntrinsic *MI); + static MemoryLocation getForDest(const PlainMemIntrinsic *MI); /// Return a location representing a particular argument of a call. static MemoryLocation getForArgument(ImmutableCallSite CS, unsigned ArgIdx, Index: include/llvm/Analysis/PtrUseVisitor.h =================================================================== --- include/llvm/Analysis/PtrUseVisitor.h +++ include/llvm/Analysis/PtrUseVisitor.h @@ -278,7 +278,7 @@ // No-op intrinsics which we know don't escape the pointer to to logic in // some other function. void visitDbgInfoIntrinsic(DbgInfoIntrinsic &I) {} - void visitMemIntrinsic(MemIntrinsic &I) {} + void visitPlainMemIntrinsic(PlainMemIntrinsic &I) {} void visitIntrinsicInst(IntrinsicInst &II) { switch (II.getIntrinsicID()) { default: Index: include/llvm/IR/InstVisitor.h =================================================================== --- include/llvm/IR/InstVisitor.h +++ include/llvm/IR/InstVisitor.h @@ -214,11 +214,11 @@ RetTy visitDbgDeclareInst(DbgDeclareInst &I) { DELEGATE(DbgInfoIntrinsic);} RetTy visitDbgValueInst(DbgValueInst &I) { DELEGATE(DbgInfoIntrinsic);} RetTy visitDbgInfoIntrinsic(DbgInfoIntrinsic &I) { DELEGATE(IntrinsicInst); } - RetTy visitMemSetInst(MemSetInst &I) { DELEGATE(MemIntrinsic); } - RetTy visitMemCpyInst(MemCpyInst &I) { DELEGATE(MemTransferInst); } - RetTy visitMemMoveInst(MemMoveInst &I) { DELEGATE(MemTransferInst); } - RetTy visitMemTransferInst(MemTransferInst &I) { DELEGATE(MemIntrinsic); } - RetTy visitMemIntrinsic(MemIntrinsic &I) { DELEGATE(IntrinsicInst); } + RetTy visitPlainMemSetInst(PlainMemSetInst &I) { DELEGATE(PlainMemIntrinsic); } + RetTy visitPlainMemCpyInst(PlainMemCpyInst &I) { DELEGATE(PlainMemTransferInst); } + RetTy visitPlainMemMoveInst(PlainMemMoveInst &I) { DELEGATE(PlainMemTransferInst); } + RetTy visitPlainMemTransferInst(PlainMemTransferInst &I) { DELEGATE(PlainMemIntrinsic); } + RetTy visitPlainMemIntrinsic(PlainMemIntrinsic &I) { DELEGATE(IntrinsicInst); } RetTy visitVAStartInst(VAStartInst &I) { DELEGATE(IntrinsicInst); } RetTy visitVAEndInst(VAEndInst &I) { DELEGATE(IntrinsicInst); } RetTy visitVACopyInst(VACopyInst &I) { DELEGATE(IntrinsicInst); } @@ -272,9 +272,9 @@ default: DELEGATE(IntrinsicInst); case Intrinsic::dbg_declare: DELEGATE(DbgDeclareInst); case Intrinsic::dbg_value: DELEGATE(DbgValueInst); - case Intrinsic::memcpy: DELEGATE(MemCpyInst); - case Intrinsic::memmove: DELEGATE(MemMoveInst); - case Intrinsic::memset: DELEGATE(MemSetInst); + case Intrinsic::memcpy: DELEGATE(PlainMemCpyInst); + case Intrinsic::memmove: DELEGATE(PlainMemMoveInst); + case Intrinsic::memset: DELEGATE(PlainMemSetInst); case Intrinsic::vastart: DELEGATE(VAStartInst); case Intrinsic::vaend: DELEGATE(VAEndInst); case Intrinsic::vacopy: DELEGATE(VACopyInst); Index: include/llvm/IR/IntrinsicInst.h =================================================================== --- include/llvm/IR/IntrinsicInst.h +++ include/llvm/IR/IntrinsicInst.h @@ -11,7 +11,7 @@ // functions with the isa/dyncast family of functions. In particular, this // allows you to do things like: // -// if (MemCpyInst *MCI = dyn_cast(Inst)) +// if (PlainMemCpyInst *MCI = dyn_cast(Inst)) // ... MCI->getDest() ... MCI->getSource() ... // // All intrinsic function calls are instances of the call instruction, so these @@ -214,9 +214,9 @@ }; /// This class represents atomic memcpy intrinsic - /// TODO: Integrate this class into MemIntrinsic hierarchy; for now this is + /// TODO: Integrate this class into PlainMemIntrinsic hierarchy; for now this is /// C&P of all methods from that hierarchy - class ElementUnorderedAtomicMemCpyInst : public IntrinsicInst { + class AtomicMemCpyInst : public IntrinsicInst { private: enum { ARG_DEST = 0, ARG_SOURCE = 1, ARG_LENGTH = 2, ARG_ELEMENTSIZE = 3 }; @@ -305,7 +305,7 @@ } }; - class ElementUnorderedAtomicMemMoveInst : public IntrinsicInst { + class AtomicMemMoveInst : public IntrinsicInst { private: enum { ARG_DEST = 0, ARG_SOURCE = 1, ARG_LENGTH = 2, ARG_ELEMENTSIZE = 3 }; @@ -395,9 +395,9 @@ }; /// This class represents atomic memset intrinsic - /// TODO: Integrate this class into MemIntrinsic hierarchy; for now this is + /// TODO: Integrate this class into PlainMemIntrinsic hierarchy; for now this is /// C&P of all methods from that hierarchy - class ElementUnorderedAtomicMemSetInst : public IntrinsicInst { + class AtomicMemSetInst : public IntrinsicInst { private: enum { ARG_DEST = 0, ARG_VALUE = 1, ARG_LENGTH = 2, ARG_ELEMENTSIZE = 3 }; @@ -475,7 +475,7 @@ }; /// This is the common base class for memset/memcpy/memmove. - class MemIntrinsic : public IntrinsicInst { + class PlainMemIntrinsic : public IntrinsicInst { public: Value *getRawDest() const { return const_cast(getArgOperand(0)); } const Use &getRawDestUse() const { return getArgOperandUse(0); } @@ -551,7 +551,7 @@ }; /// This class wraps the llvm.memset intrinsic. - class MemSetInst : public MemIntrinsic { + class PlainMemSetInst : public PlainMemIntrinsic { public: /// Return the arguments to the instruction. Value *getValue() const { return const_cast(getArgOperand(1)); } @@ -574,7 +574,7 @@ }; /// This class wraps the llvm.memcpy/memmove intrinsics. - class MemTransferInst : public MemIntrinsic { + class PlainMemTransferInst : public PlainMemIntrinsic { public: /// Return the arguments to the instruction. Value *getRawSource() const { return const_cast(getArgOperand(1)); } @@ -607,7 +607,7 @@ }; /// This class wraps the llvm.memcpy intrinsic. - class MemCpyInst : public MemTransferInst { + class PlainMemCpyInst : public PlainMemTransferInst { public: // Methods for support type inquiry through isa, cast, and dyn_cast: static bool classof(const IntrinsicInst *I) { @@ -619,7 +619,7 @@ }; /// This class wraps the llvm.memmove intrinsic. - class MemMoveInst : public MemTransferInst { + class PlainMemMoveInst : public PlainMemTransferInst { public: // Methods for support type inquiry through isa, cast, and dyn_cast: static bool classof(const IntrinsicInst *I) { Index: include/llvm/Transforms/Scalar/AlignmentFromAssumptions.h =================================================================== --- include/llvm/Transforms/Scalar/AlignmentFromAssumptions.h +++ include/llvm/Transforms/Scalar/AlignmentFromAssumptions.h @@ -37,7 +37,7 @@ // destination. If we have a new alignment for only one operand of a transfer // instruction, save it in these maps. If we reach the other operand through // another assumption later, then we may change the alignment at that point. - DenseMap NewDestAlignments, NewSrcAlignments; + DenseMap NewDestAlignments, NewSrcAlignments; ScalarEvolution *SE = nullptr; DominatorTree *DT = nullptr; Index: include/llvm/Transforms/Scalar/MemCpyOptimizer.h =================================================================== --- include/llvm/Transforms/Scalar/MemCpyOptimizer.h +++ include/llvm/Transforms/Scalar/MemCpyOptimizer.h @@ -51,14 +51,14 @@ private: // Helper functions bool processStore(StoreInst *SI, BasicBlock::iterator &BBI); - bool processMemSet(MemSetInst *SI, BasicBlock::iterator &BBI); - bool processMemCpy(MemCpyInst *M); - bool processMemMove(MemMoveInst *M); + bool processMemSet(PlainMemSetInst *SI, BasicBlock::iterator &BBI); + bool processMemCpy(PlainMemCpyInst *M); + bool processMemMove(PlainMemMoveInst *M); bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc, uint64_t cpyLen, unsigned cpyAlign, CallInst *C); - bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep); - bool processMemSetMemCpyDependence(MemCpyInst *M, MemSetInst *MDep); - bool performMemCpyToMemSetOptzn(MemCpyInst *M, MemSetInst *MDep); + bool processMemCpyMemCpyDependence(PlainMemCpyInst *M, PlainMemCpyInst *MDep); + bool processMemSetMemCpyDependence(PlainMemCpyInst *M, PlainMemSetInst *MDep); + bool performMemCpyToMemSetOptzn(PlainMemCpyInst *M, PlainMemSetInst *MDep); bool processByValArgument(CallSite CS, unsigned ArgNo); Instruction *tryMergingIntoMemset(Instruction *I, Value *StartPtr, Value *ByteVal); Index: include/llvm/Transforms/Utils/LowerMemIntrinsics.h =================================================================== --- include/llvm/Transforms/Utils/LowerMemIntrinsics.h +++ include/llvm/Transforms/Utils/LowerMemIntrinsics.h @@ -19,9 +19,9 @@ class ConstantInt; class Instruction; -class MemCpyInst; -class MemMoveInst; -class MemSetInst; +class PlainMemCpyInst; +class PlainMemMoveInst; +class PlainMemSetInst; class TargetTransformInfo; class Value; @@ -49,13 +49,13 @@ /// Expand \p MemCpy as a loop. \p MemCpy is not deleted. -void expandMemCpyAsLoop(MemCpyInst *MemCpy, const TargetTransformInfo &TTI); +void expandMemCpyAsLoop(PlainMemCpyInst *MemCpy, const TargetTransformInfo &TTI); /// Expand \p MemMove as a loop. \p MemMove is not deleted. -void expandMemMoveAsLoop(MemMoveInst *MemMove); +void expandMemMoveAsLoop(PlainMemMoveInst *MemMove); /// Expand \p MemSet as a loop. \p MemSet is not deleted. -void expandMemSetAsLoop(MemSetInst *MemSet); +void expandMemSetAsLoop(PlainMemSetInst *MemSet); } // End llvm namespace Index: include/llvm/Transforms/Utils/VNCoercion.h =================================================================== --- include/llvm/Transforms/Utils/VNCoercion.h +++ include/llvm/Transforms/Utils/VNCoercion.h @@ -27,7 +27,7 @@ class Function; class StoreInst; class LoadInst; -class MemIntrinsic; +class PlainMemIntrinsic; class Instruction; class Value; class Type; @@ -69,7 +69,7 @@ /// On success, it returns the offset into DepMI that extraction would start. /// On failure, it returns -1. int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, - MemIntrinsic *DepMI, const DataLayout &DL); + PlainMemIntrinsic *DepMI, const DataLayout &DL); /// If analyzeLoadFromClobberingStore returned an offset, this function can be /// used to actually perform the extraction of the bits from the store. It @@ -96,12 +96,12 @@ /// used to actually perform the extraction of the bits from the memory /// intrinsic. It inserts instructions to do so at InsertPt, and returns the /// extracted value. -Value *getMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, +Value *getMemInstValueForLoad(PlainMemIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL); // This is the same as getStoreValueForLoad, except it performs no insertion. // It returns nullptr if it cannot produce a constant. -Constant *getConstantMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, +Constant *getConstantMemInstValueForLoad(PlainMemIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, const DataLayout &DL); } } Index: lib/Analysis/AliasSetTracker.cpp =================================================================== --- lib/Analysis/AliasSetTracker.cpp +++ lib/Analysis/AliasSetTracker.cpp @@ -386,7 +386,7 @@ AliasSet::ModRefAccess); } -void AliasSetTracker::add(MemSetInst *MSI) { +void AliasSetTracker::add(PlainMemSetInst *MSI) { AAMDNodes AAInfo; MSI->getAAMetadata(AAInfo); @@ -403,7 +403,7 @@ AS.setVolatile(); } -void AliasSetTracker::add(MemTransferInst *MTI) { +void AliasSetTracker::add(PlainMemTransferInst *MTI) { AAMDNodes AAInfo; MTI->getAAMetadata(AAInfo); @@ -460,9 +460,9 @@ return add(SI); if (VAArgInst *VAAI = dyn_cast(I)) return add(VAAI); - if (MemSetInst *MSI = dyn_cast(I)) + if (PlainMemSetInst *MSI = dyn_cast(I)) return add(MSI); - if (MemTransferInst *MTI = dyn_cast(I)) + if (PlainMemTransferInst *MTI = dyn_cast(I)) return add(MTI); return addUnknown(I); } Index: lib/Analysis/BasicAliasAnalysis.cpp =================================================================== --- lib/Analysis/BasicAliasAnalysis.cpp +++ lib/Analysis/BasicAliasAnalysis.cpp @@ -841,7 +841,7 @@ // operands, i.e., source and destination of any given memcpy must no-alias. // If Loc must-aliases either one of these two locations, then it necessarily // no-aliases the other. - if (auto *Inst = dyn_cast(CS.getInstruction())) { + if (auto *Inst = dyn_cast(CS.getInstruction())) { AliasResult SrcAA, DestAA; if ((SrcAA = getBestAAResults().alias(MemoryLocation::getForSource(Inst), Index: lib/Analysis/CaptureTracking.cpp =================================================================== --- lib/Analysis/CaptureTracking.cpp +++ lib/Analysis/CaptureTracking.cpp @@ -245,7 +245,7 @@ // Volatile operations effectively capture the memory location that they // load and store to. - if (auto *MI = dyn_cast(I)) + if (auto *MI = dyn_cast(I)) if (MI->isVolatile()) if (Tracker->captured(U)) return; Index: lib/Analysis/LazyValueInfo.cpp =================================================================== --- lib/Analysis/LazyValueInfo.cpp +++ lib/Analysis/LazyValueInfo.cpp @@ -637,7 +637,7 @@ GetUnderlyingObject(S->getPointerOperand(), S->getModule()->getDataLayout()) == Ptr; } - if (MemIntrinsic *MI = dyn_cast(I)) { + if (PlainMemIntrinsic *MI = dyn_cast(I)) { if (MI->isVolatile()) return false; // FIXME: check whether it has a valuerange that excludes zero? @@ -648,7 +648,7 @@ if (GetUnderlyingObject(MI->getRawDest(), MI->getModule()->getDataLayout()) == Ptr) return true; - if (MemTransferInst *MTI = dyn_cast(MI)) + if (PlainMemTransferInst *MTI = dyn_cast(MI)) if (MTI->getSourceAddressSpace() == 0) if (GetUnderlyingObject(MTI->getRawSource(), MTI->getModule()->getDataLayout()) == Ptr) Index: lib/Analysis/Lint.cpp =================================================================== --- lib/Analysis/Lint.cpp +++ lib/Analysis/Lint.cpp @@ -303,7 +303,7 @@ // TODO: Check more intrinsics case Intrinsic::memcpy: { - MemCpyInst *MCI = cast(&I); + PlainMemCpyInst *MCI = cast(&I); // TODO: If the size is known, use it. visitMemoryReference(I, MCI->getDest(), MemoryLocation::UnknownSize, MCI->getAlignment(), nullptr, MemRef::Write); @@ -325,7 +325,7 @@ break; } case Intrinsic::memmove: { - MemMoveInst *MMI = cast(&I); + PlainMemMoveInst *MMI = cast(&I); // TODO: If the size is known, use it. visitMemoryReference(I, MMI->getDest(), MemoryLocation::UnknownSize, MMI->getAlignment(), nullptr, MemRef::Write); @@ -334,7 +334,7 @@ break; } case Intrinsic::memset: { - MemSetInst *MSI = cast(&I); + PlainMemSetInst *MSI = cast(&I); // TODO: If the size is known, use it. visitMemoryReference(I, MSI->getDest(), MemoryLocation::UnknownSize, MSI->getAlignment(), nullptr, MemRef::Write); Index: lib/Analysis/MemoryLocation.cpp =================================================================== --- lib/Analysis/MemoryLocation.cpp +++ lib/Analysis/MemoryLocation.cpp @@ -64,7 +64,7 @@ AATags); } -MemoryLocation MemoryLocation::getForSource(const MemTransferInst *MTI) { +MemoryLocation MemoryLocation::getForSource(const PlainMemTransferInst *MTI) { uint64_t Size = UnknownSize; if (ConstantInt *C = dyn_cast(MTI->getLength())) Size = C->getValue().getZExtValue(); @@ -77,7 +77,7 @@ return MemoryLocation(MTI->getRawSource(), Size, AATags); } -MemoryLocation MemoryLocation::getForDest(const MemIntrinsic *MTI) { +MemoryLocation MemoryLocation::getForDest(const PlainMemIntrinsic *MTI) { uint64_t Size = UnknownSize; if (ConstantInt *C = dyn_cast(MTI->getLength())) Size = C->getValue().getZExtValue(); Index: lib/Analysis/ValueTracking.cpp =================================================================== --- lib/Analysis/ValueTracking.cpp +++ lib/Analysis/ValueTracking.cpp @@ -3835,7 +3835,7 @@ return !CXI->isVolatile(); if (const AtomicRMWInst *RMWI = dyn_cast(I)) return !RMWI->isVolatile(); - if (const MemIntrinsic *MII = dyn_cast(I)) + if (const PlainMemIntrinsic *MII = dyn_cast(I)) return !MII->isVolatile(); // If there is no successor, then execution can't transfer to it. Index: lib/CodeGen/CodeGenPrepare.cpp =================================================================== --- lib/CodeGen/CodeGenPrepare.cpp +++ lib/CodeGen/CodeGenPrepare.cpp @@ -2421,9 +2421,9 @@ } // If this is a memcpy (or similar) then we may be able to improve the // alignment - if (MemIntrinsic *MI = dyn_cast(CI)) { + if (PlainMemIntrinsic *MI = dyn_cast(CI)) { unsigned Align = getKnownAlignment(MI->getDest(), *DL); - if (MemTransferInst *MTI = dyn_cast(MI)) + if (PlainMemTransferInst *MTI = dyn_cast(MI)) Align = std::min(Align, getKnownAlignment(MTI->getSource(), *DL)); if (Align > MI->getAlignment()) MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), Align)); Index: lib/CodeGen/SafeStack.cpp =================================================================== --- lib/CodeGen/SafeStack.cpp +++ lib/CodeGen/SafeStack.cpp @@ -186,7 +186,7 @@ bool IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize); - bool IsMemIntrinsicSafe(const MemIntrinsic *MI, const Use &U, + bool IsMemIntrinsicSafe(const PlainMemIntrinsic *MI, const Use &U, const Value *AllocaPtr, uint64_t AllocaSize); bool IsAccessSafe(Value *Addr, uint64_t Size, const Value *AllocaPtr, uint64_t AllocaSize); @@ -244,7 +244,7 @@ return Safe; } -bool SafeStack::IsMemIntrinsicSafe(const MemIntrinsic *MI, const Use &U, +bool SafeStack::IsMemIntrinsicSafe(const PlainMemIntrinsic *MI, const Use &U, const Value *AllocaPtr, uint64_t AllocaSize) { // All MemIntrinsics have destination address in Arg0 and size in Arg2. @@ -310,7 +310,7 @@ continue; } - if (const MemIntrinsic *MI = dyn_cast(I)) { + if (const PlainMemIntrinsic *MI = dyn_cast(I)) { if (!IsMemIntrinsicSafe(MI, UI, AllocaPtr, AllocaSize)) { DEBUG(dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtr << "\n unsafe memintrinsic: " << *I Index: lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -5032,8 +5032,8 @@ return nullptr; } case Intrinsic::memcpy_element_unordered_atomic: { - const ElementUnorderedAtomicMemCpyInst &MI = - cast(I); + const AtomicMemCpyInst &MI = + cast(I); SDValue Dst = getValue(MI.getRawDest()); SDValue Src = getValue(MI.getRawSource()); SDValue Length = getValue(MI.getLength()); @@ -5071,7 +5071,7 @@ return nullptr; } case Intrinsic::memmove_element_unordered_atomic: { - auto &MI = cast(I); + auto &MI = cast(I); SDValue Dst = getValue(MI.getRawDest()); SDValue Src = getValue(MI.getRawSource()); SDValue Length = getValue(MI.getLength()); @@ -5109,7 +5109,7 @@ return nullptr; } case Intrinsic::memset_element_unordered_atomic: { - auto &MI = cast(I); + auto &MI = cast(I); SDValue Dst = getValue(MI.getRawDest()); SDValue Val = getValue(MI.getValue()); SDValue Length = getValue(MI.getLength()); Index: lib/IR/Verifier.cpp =================================================================== --- lib/IR/Verifier.cpp +++ lib/IR/Verifier.cpp @@ -4025,8 +4025,8 @@ break; } case Intrinsic::memcpy_element_unordered_atomic: { - const ElementUnorderedAtomicMemCpyInst *MI = - cast(CS.getInstruction()); + const AtomicMemCpyInst *MI = + cast(CS.getInstruction()); ; ConstantInt *ElementSizeCI = @@ -4062,7 +4062,7 @@ break; } case Intrinsic::memmove_element_unordered_atomic: { - auto *MI = cast(CS.getInstruction()); + auto *MI = cast(CS.getInstruction()); ConstantInt *ElementSizeCI = dyn_cast(MI->getRawElementSizeInBytes()); @@ -4097,7 +4097,7 @@ break; } case Intrinsic::memset_element_unordered_atomic: { - auto *MI = cast(CS.getInstruction()); + auto *MI = cast(CS.getInstruction()); ConstantInt *ElementSizeCI = dyn_cast(MI->getRawElementSizeInBytes()); Index: lib/Target/AArch64/AArch64FastISel.cpp =================================================================== --- lib/Target/AArch64/AArch64FastISel.cpp +++ lib/Target/AArch64/AArch64FastISel.cpp @@ -3444,7 +3444,7 @@ } case Intrinsic::memcpy: case Intrinsic::memmove: { - const auto *MTI = cast(II); + const auto *MTI = cast(II); // Don't handle volatile. if (MTI->isVolatile()) return false; @@ -3475,11 +3475,11 @@ // address spaces. return false; - const char *IntrMemName = isa(II) ? "memcpy" : "memmove"; + const char *IntrMemName = isa(II) ? "memcpy" : "memmove"; return lowerCallTo(II, IntrMemName, II->getNumArgOperands() - 2); } case Intrinsic::memset: { - const MemSetInst *MSI = cast(II); + const PlainMemSetInst *MSI = cast(II); // Don't handle volatile. if (MSI->isVolatile()) return false; Index: lib/Target/AMDGPU/AMDGPULowerIntrinsics.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPULowerIntrinsics.cpp +++ lib/Target/AMDGPU/AMDGPULowerIntrinsics.cpp @@ -71,7 +71,7 @@ switch (ID) { case Intrinsic::memcpy: { - auto *Memcpy = cast(Inst); + auto *Memcpy = cast(Inst); if (shouldExpandOperationWithSize(Memcpy->getLength())) { Function *ParentFunc = Memcpy->getParent()->getParent(); const TargetTransformInfo &TTI = @@ -84,7 +84,7 @@ break; } case Intrinsic::memmove: { - auto *Memmove = cast(Inst); + auto *Memmove = cast(Inst); if (shouldExpandOperationWithSize(Memmove->getLength())) { expandMemMoveAsLoop(Memmove); Changed = true; @@ -94,7 +94,7 @@ break; } case Intrinsic::memset: { - auto *Memset = cast(Inst); + auto *Memset = cast(Inst); if (shouldExpandOperationWithSize(Memset->getLength())) { expandMemSetAsLoop(Memset); Changed = true; Index: lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp +++ lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp @@ -842,7 +842,7 @@ Intr->eraseFromParent(); continue; case Intrinsic::memcpy: { - MemCpyInst *MemCpy = cast(Intr); + PlainMemCpyInst *MemCpy = cast(Intr); Builder.CreateMemCpy(MemCpy->getRawDest(), MemCpy->getRawSource(), MemCpy->getLength(), MemCpy->getAlignment(), MemCpy->isVolatile()); @@ -850,7 +850,7 @@ continue; } case Intrinsic::memmove: { - MemMoveInst *MemMove = cast(Intr); + PlainMemMoveInst *MemMove = cast(Intr); Builder.CreateMemMove(MemMove->getRawDest(), MemMove->getRawSource(), MemMove->getLength(), MemMove->getAlignment(), MemMove->isVolatile()); @@ -858,7 +858,7 @@ continue; } case Intrinsic::memset: { - MemSetInst *MemSet = cast(Intr); + PlainMemSetInst *MemSet = cast(Intr); Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(), MemSet->getLength(), MemSet->getAlignment(), MemSet->isVolatile()); Index: lib/Target/ARM/ARMFastISel.cpp =================================================================== --- lib/Target/ARM/ARMFastISel.cpp +++ lib/Target/ARM/ARMFastISel.cpp @@ -2529,7 +2529,7 @@ } case Intrinsic::memcpy: case Intrinsic::memmove: { - const MemTransferInst &MTI = cast(I); + const PlainMemTransferInst &MTI = cast(I); // Don't handle volatile. if (MTI.isVolatile()) return false; @@ -2558,11 +2558,11 @@ if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255) return false; - const char *IntrMemName = isa(I) ? "memcpy" : "memmove"; + const char *IntrMemName = isa(I) ? "memcpy" : "memmove"; return SelectCall(&I, IntrMemName); } case Intrinsic::memset: { - const MemSetInst &MSI = cast(I); + const PlainMemSetInst &MSI = cast(I); // Don't handle volatile. if (MSI.isVolatile()) return false; Index: lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- lib/Target/ARM/ARMISelLowering.cpp +++ lib/Target/ARM/ARMISelLowering.cpp @@ -1412,7 +1412,7 @@ // to align such objects passed to memory intrinsics. bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, unsigned &PrefAlign) const { - if (!isa(CI)) + if (!isa(CI)) return false; MinSize = 8; // On ARM11 onwards (excluding M class) 8-byte aligned LDM is typically 1 Index: lib/Target/Mips/MipsFastISel.cpp =================================================================== --- lib/Target/Mips/MipsFastISel.cpp +++ lib/Target/Mips/MipsFastISel.cpp @@ -1621,17 +1621,17 @@ } case Intrinsic::memcpy: case Intrinsic::memmove: { - const auto *MTI = cast(II); + const auto *MTI = cast(II); // Don't handle volatile. if (MTI->isVolatile()) return false; if (!MTI->getLength()->getType()->isIntegerTy(32)) return false; - const char *IntrMemName = isa(II) ? "memcpy" : "memmove"; + const char *IntrMemName = isa(II) ? "memcpy" : "memmove"; return lowerCallTo(II, IntrMemName, II->getNumArgOperands() - 2); } case Intrinsic::memset: { - const MemSetInst *MSI = cast(II); + const PlainMemSetInst *MSI = cast(II); // Don't handle volatile. if (MSI->isVolatile()) return false; Index: lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp =================================================================== --- lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp +++ lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp @@ -59,7 +59,7 @@ bool NVPTXLowerAggrCopies::runOnFunction(Function &F) { SmallVector AggrLoads; - SmallVector MemCalls; + SmallVector MemCalls; const DataLayout &DL = F.getParent()->getDataLayout(); LLVMContext &Context = F.getParent()->getContext(); @@ -82,7 +82,7 @@ continue; AggrLoads.push_back(LI); } - } else if (MemIntrinsic *IntrCall = dyn_cast(II)) { + } else if (PlainMemIntrinsic *IntrCall = dyn_cast(II)) { // Convert intrinsic calls with variable size or with constant size // larger than the MaxAggrCopySize threshold. if (ConstantInt *LenCI = dyn_cast(IntrCall->getLength())) { @@ -134,12 +134,12 @@ } // Transform mem* intrinsic calls. - for (MemIntrinsic *MemCall : MemCalls) { - if (MemCpyInst *Memcpy = dyn_cast(MemCall)) { + for (PlainMemIntrinsic *MemCall : MemCalls) { + if (PlainMemCpyInst *Memcpy = dyn_cast(MemCall)) { expandMemCpyAsLoop(Memcpy, TTI); - } else if (MemMoveInst *Memmove = dyn_cast(MemCall)) { + } else if (PlainMemMoveInst *Memmove = dyn_cast(MemCall)) { expandMemMoveAsLoop(Memmove); - } else if (MemSetInst *Memset = dyn_cast(MemCall)) { + } else if (PlainMemSetInst *Memset = dyn_cast(MemCall)) { expandMemSetAsLoop(Memset); } MemCall->eraseFromParent(); Index: lib/Target/X86/X86FastISel.cpp =================================================================== --- lib/Target/X86/X86FastISel.cpp +++ lib/Target/X86/X86FastISel.cpp @@ -2694,7 +2694,7 @@ return true; } case Intrinsic::memcpy: { - const MemCpyInst *MCI = cast(II); + const PlainMemCpyInst *MCI = cast(II); // Don't handle volatile or variable length memcpys. if (MCI->isVolatile()) return false; @@ -2723,7 +2723,7 @@ return lowerCallTo(II, "memcpy", II->getNumArgOperands() - 2); } case Intrinsic::memset: { - const MemSetInst *MSI = cast(II); + const PlainMemSetInst *MSI = cast(II); if (MSI->isVolatile()) return false; Index: lib/Transforms/IPO/GlobalOpt.cpp =================================================================== --- lib/Transforms/IPO/GlobalOpt.cpp +++ lib/Transforms/IPO/GlobalOpt.cpp @@ -177,7 +177,7 @@ if (I->hasOneUse()) Dead.push_back(std::make_pair(I, SI)); } - } else if (MemSetInst *MSI = dyn_cast(U)) { + } else if (PlainMemSetInst *MSI = dyn_cast(U)) { if (isa(MSI->getValue())) { Changed = true; MSI->eraseFromParent(); @@ -185,7 +185,7 @@ if (I->hasOneUse()) Dead.push_back(std::make_pair(I, MSI)); } - } else if (MemTransferInst *MTI = dyn_cast(U)) { + } else if (PlainMemTransferInst *MTI = dyn_cast(U)) { GlobalVariable *MemSrc = dyn_cast(MTI->getSource()); if (MemSrc && MemSrc->isConstant()) { Changed = true; @@ -300,7 +300,7 @@ GEP->eraseFromParent(); Changed = true; } - } else if (MemIntrinsic *MI = dyn_cast(U)) { // memset/cpy/mv + } else if (PlainMemIntrinsic *MI = dyn_cast(U)) { // memset/cpy/mv if (MI->getRawDest() == V) { MI->eraseFromParent(); Changed = true; Index: lib/Transforms/InstCombine/InstCombineCalls.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineCalls.cpp +++ lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -95,7 +95,7 @@ } Instruction *InstCombiner::SimplifyElementUnorderedAtomicMemCpy( - ElementUnorderedAtomicMemCpyInst *AMI) { + AtomicMemCpyInst *AMI) { // Try to unfold this intrinsic into sequence of explicit atomic loads and // stores. // First check that number of elements is compile time constant. @@ -171,7 +171,7 @@ return AMI; } -Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) { +Instruction *InstCombiner::SimplifyMemTransfer(PlainMemIntrinsic *MI) { unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), DL, MI, &AC, &DT); unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), DL, MI, &AC, &DT); unsigned MinAlign = std::min(DstAlign, SrcAlign); @@ -182,7 +182,7 @@ return MI; } - // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with + // If PlainMemCpyInst length is 1/2/4/8 bytes then replace memcpy with // load/store. ConstantInt *MemOpLength = dyn_cast(MI->getArgOperand(2)); if (!MemOpLength) return nullptr; @@ -250,7 +250,7 @@ return MI; } -Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) { +Instruction *InstCombiner::SimplifyMemSet(PlainMemSetInst *MI) { unsigned Alignment = getKnownAlignment(MI->getDest(), DL, MI, &AC, &DT); if (MI->getAlignment() < Alignment) { MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), @@ -1811,7 +1811,7 @@ // Intrinsics cannot occur in an invoke, so handle them here instead of in // visitCallSite. - if (MemIntrinsic *MI = dyn_cast(II)) { + if (PlainMemIntrinsic *MI = dyn_cast(II)) { bool Changed = false; // memmove/cpy/set of zero bytes is a noop. @@ -1834,7 +1834,7 @@ // If we have a memmove and the source operation is a constant global, // then the source and dest pointers can't alias, so we can change this // into a call to memcpy. - if (MemMoveInst *MMI = dyn_cast(MI)) { + if (PlainMemMoveInst *MMI = dyn_cast(MI)) { if (GlobalVariable *GVSrc = dyn_cast(MMI->getSource())) if (GVSrc->isConstant()) { Module *M = CI.getModule(); @@ -1847,7 +1847,7 @@ } } - if (MemTransferInst *MTI = dyn_cast(MI)) { + if (PlainMemTransferInst *MTI = dyn_cast(MI)) { // memmove(x,x,size) -> noop. if (MTI->getSource() == MTI->getDest()) return eraseInstFromFunction(CI); @@ -1855,10 +1855,10 @@ // If we can determine a pointer alignment that is bigger than currently // set, update the alignment. - if (isa(MI)) { + if (isa(MI)) { if (Instruction *I = SimplifyMemTransfer(MI)) return I; - } else if (MemSetInst *MSI = dyn_cast(MI)) { + } else if (PlainMemSetInst *MSI = dyn_cast(MI)) { if (Instruction *I = SimplifyMemSet(MSI)) return I; } @@ -1866,7 +1866,7 @@ if (Changed) return II; } - if (auto *AMI = dyn_cast(II)) { + if (auto *AMI = dyn_cast(II)) { if (Constant *C = dyn_cast(AMI->getLength())) if (C->isNullValue()) return eraseInstFromFunction(*AMI); Index: lib/Transforms/InstCombine/InstCombineInternal.h =================================================================== --- lib/Transforms/InstCombine/InstCombineInternal.h +++ lib/Transforms/InstCombine/InstCombineInternal.h @@ -39,8 +39,8 @@ class DataLayout; class DominatorTree; class TargetLibraryInfo; -class MemIntrinsic; -class MemSetInst; +class PlainMemIntrinsic; +class PlainMemSetInst; class OptimizationRemarkEmitter; /// Assign a complexity or rank value to LLVM Values. This is used to reduce @@ -752,9 +752,9 @@ bool SimplifyStoreAtEndOfBlock(StoreInst &SI); Instruction * - SimplifyElementUnorderedAtomicMemCpy(ElementUnorderedAtomicMemCpyInst *AMI); - Instruction *SimplifyMemTransfer(MemIntrinsic *MI); - Instruction *SimplifyMemSet(MemSetInst *MI); + SimplifyElementUnorderedAtomicMemCpy(AtomicMemCpyInst *AMI); + Instruction *SimplifyMemTransfer(PlainMemIntrinsic *MI); + Instruction *SimplifyMemSet(PlainMemSetInst *MI); Value *EvaluateInDifferentType(Value *V, Type *Ty, bool isSigned); Index: lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -55,7 +55,7 @@ /// the alloca, and if the source pointer is a pointer to a constant global, we /// can optimize this. static bool -isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy, +isOnlyCopiedFromConstantGlobal(Value *V, PlainMemTransferInst *&TheCopy, SmallVectorImpl &ToDelete) { // We track lifetime intrinsics as we encounter them. If we decide to go // ahead and replace the value with the global, this lets the caller quickly @@ -125,7 +125,7 @@ // If this is isn't our memcpy/memmove, reject it as something we can't // handle. - MemTransferInst *MI = dyn_cast(I); + PlainMemTransferInst *MI = dyn_cast(I); if (!MI) return false; @@ -160,10 +160,10 @@ /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only /// modified by a copy from a constant global. If we can prove this, we can /// replace any uses of the alloca with uses of the global directly. -static MemTransferInst * +static PlainMemTransferInst * isOnlyCopiedFromConstantGlobal(AllocaInst *AI, SmallVectorImpl &ToDelete) { - MemTransferInst *TheCopy = nullptr; + PlainMemTransferInst *TheCopy = nullptr; if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete)) return TheCopy; return nullptr; @@ -399,7 +399,7 @@ // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A' // is only subsequently read. SmallVector ToDelete; - if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) { + if (PlainMemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) { unsigned SourceAlign = getOrEnforceKnownAlignment( Copy->getSource(), AI.getAlignment(), DL, &AI, &AC, &DT); if (AI.getAlignment() <= SourceAlign && Index: lib/Transforms/InstCombine/InstructionCombining.cpp =================================================================== --- lib/Transforms/InstCombine/InstructionCombining.cpp +++ lib/Transforms/InstCombine/InstructionCombining.cpp @@ -2064,7 +2064,7 @@ case Intrinsic::memmove: case Intrinsic::memcpy: case Intrinsic::memset: { - MemIntrinsic *MI = cast(II); + PlainMemIntrinsic *MI = cast(II); if (MI->isVolatile() || MI->getRawDest() != PI) return false; LLVM_FALLTHROUGH; Index: lib/Transforms/Instrumentation/AddressSanitizer.cpp =================================================================== --- lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -558,7 +558,7 @@ Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr, bool IsWrite, size_t AccessSizeIndex, Value *SizeArgument, uint32_t Exp); - void instrumentMemIntrinsic(MemIntrinsic *MI); + void instrumentMemIntrinsic(PlainMemIntrinsic *MI); Value *memToShadow(Value *Shadow, IRBuilder<> &IRB); bool runOnFunction(Function &F) override; bool maybeInsertAsanInitAtFunctionEntry(Function &F); @@ -1043,15 +1043,15 @@ } // Instrument memset/memmove/memcpy -void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) { +void AddressSanitizer::instrumentMemIntrinsic(PlainMemIntrinsic *MI) { IRBuilder<> IRB(MI); - if (isa(MI)) { + if (isa(MI)) { IRB.CreateCall( - isa(MI) ? AsanMemmove : AsanMemcpy, + isa(MI) ? AsanMemmove : AsanMemcpy, {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()), IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)}); - } else if (isa(MI)) { + } else if (isa(MI)) { IRB.CreateCall( AsanMemset, {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()), @@ -2335,7 +2335,7 @@ isInterestingPointerComparisonOrSubtraction(&Inst)) { PointerComparisonsOrSubtracts.push_back(&Inst); continue; - } else if (isa(Inst)) { + } else if (isa(Inst)) { // ok, take it. } else { if (isa(Inst)) NumAllocas++; @@ -2373,7 +2373,7 @@ instrumentMop(ObjSizeVis, Inst, UseCalls, F.getParent()->getDataLayout()); else - instrumentMemIntrinsic(cast(Inst)); + instrumentMemIntrinsic(cast(Inst)); } NumInstrumented++; } Index: lib/Transforms/Instrumentation/DataFlowSanitizer.cpp =================================================================== --- lib/Transforms/Instrumentation/DataFlowSanitizer.cpp +++ lib/Transforms/Instrumentation/DataFlowSanitizer.cpp @@ -354,8 +354,8 @@ void visitInsertValueInst(InsertValueInst &I); void visitAllocaInst(AllocaInst &I); void visitSelectInst(SelectInst &I); - void visitMemSetInst(MemSetInst &I); - void visitMemTransferInst(MemTransferInst &I); + void visitPlainMemSetInst(PlainMemSetInst &I); + void visitPlainMemTransferInst(PlainMemTransferInst &I); }; } @@ -1336,7 +1336,7 @@ } } -void DFSanVisitor::visitMemSetInst(MemSetInst &I) { +void DFSanVisitor::visitPlainMemSetInst(PlainMemSetInst &I) { IRBuilder<> IRB(&I); Value *ValShadow = DFSF.getShadow(I.getValue()); IRB.CreateCall(DFSF.DFS.DFSanSetLabelFn, @@ -1345,7 +1345,7 @@ IRB.CreateZExtOrTrunc(I.getLength(), DFSF.DFS.IntptrTy)}); } -void DFSanVisitor::visitMemTransferInst(MemTransferInst &I) { +void DFSanVisitor::visitPlainMemTransferInst(PlainMemTransferInst &I) { IRBuilder<> IRB(&I); Value *DestShadow = DFSF.DFS.getShadowAddress(I.getDest(), &I); Value *SrcShadow = DFSF.DFS.getShadowAddress(I.getSource(), &I); Index: lib/Transforms/Instrumentation/EfficiencySanitizer.cpp =================================================================== --- lib/Transforms/Instrumentation/EfficiencySanitizer.cpp +++ lib/Transforms/Instrumentation/EfficiencySanitizer.cpp @@ -185,7 +185,7 @@ void createDestructor(Module &M, Constant *ToolInfoArg); bool runOnFunction(Function &F, Module &M); bool instrumentLoadOrStore(Instruction *I, const DataLayout &DL); - bool instrumentMemIntrinsic(MemIntrinsic *MI); + bool instrumentMemIntrinsic(PlainMemIntrinsic *MI); bool instrumentGetElementPtr(Instruction *I, Module &M); bool insertCounterUpdate(Instruction *I, StructType *StructTy, unsigned CounterIdx); @@ -628,7 +628,7 @@ isa(Inst) || isa(Inst)) && !shouldIgnoreMemoryAccess(&Inst)) LoadsAndStores.push_back(&Inst); - else if (isa(Inst)) + else if (isa(Inst)) MemIntrinCalls.push_back(&Inst); else if (isa(Inst)) GetElementPtrs.push_back(&Inst); @@ -645,7 +645,7 @@ if (ClInstrumentMemIntrinsics) { for (auto Inst : MemIntrinCalls) { - Res |= instrumentMemIntrinsic(cast(Inst)); + Res |= instrumentMemIntrinsic(cast(Inst)); } } @@ -720,10 +720,10 @@ // It's simplest to replace the memset/memmove/memcpy intrinsics with // calls that the runtime library intercepts. // Our pass is late enough that calls should not turn back into intrinsics. -bool EfficiencySanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) { +bool EfficiencySanitizer::instrumentMemIntrinsic(PlainMemIntrinsic *MI) { IRBuilder<> IRB(MI); bool Res = false; - if (isa(MI)) { + if (isa(MI)) { IRB.CreateCall( MemsetFn, {IRB.CreatePointerCast(MI->getArgOperand(0), IRB.getInt8PtrTy()), @@ -731,9 +731,9 @@ IRB.CreateIntCast(MI->getArgOperand(2), IntptrTy, false)}); MI->eraseFromParent(); Res = true; - } else if (isa(MI)) { + } else if (isa(MI)) { IRB.CreateCall( - isa(MI) ? MemcpyFn : MemmoveFn, + isa(MI) ? MemcpyFn : MemmoveFn, {IRB.CreatePointerCast(MI->getArgOperand(0), IRB.getInt8PtrTy()), IRB.CreatePointerCast(MI->getArgOperand(1), IRB.getInt8PtrTy()), IRB.CreateIntCast(MI->getArgOperand(2), IntptrTy, false)}); Index: lib/Transforms/Instrumentation/MemorySanitizer.cpp =================================================================== --- lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -1889,7 +1889,7 @@ /// of overlapping regions. So, we simply lower the intrinsic to a call. /// /// Similar situation exists for memcpy and memset. - void visitMemMoveInst(MemMoveInst &I) { + void visitPlainMemMoveInst(PlainMemMoveInst &I) { IRBuilder<> IRB(&I); IRB.CreateCall( MS.MemmoveFn, @@ -1903,7 +1903,7 @@ // This is somewhat unfortunate as it may slowdown small constant memcpys. // FIXME: consider doing manual inline for small constant sizes and proper // alignment. - void visitMemCpyInst(MemCpyInst &I) { + void visitPlainMemCpyInst(PlainMemCpyInst &I) { IRBuilder<> IRB(&I); IRB.CreateCall( MS.MemcpyFn, @@ -1914,7 +1914,7 @@ } // Same as memcpy. - void visitMemSetInst(MemSetInst &I) { + void visitPlainMemSetInst(PlainMemSetInst &I) { IRBuilder<> IRB(&I); IRB.CreateCall( MS.MemsetFn, Index: lib/Transforms/Instrumentation/PGOInstrumentation.cpp =================================================================== --- lib/Transforms/Instrumentation/PGOInstrumentation.cpp +++ lib/Transforms/Instrumentation/PGOInstrumentation.cpp @@ -331,9 +331,9 @@ } // Visit the IR stream and annotate all mem intrinsic call instructions. - void instrumentOneMemIntrinsic(MemIntrinsic &MI); + void instrumentOneMemIntrinsic(PlainMemIntrinsic &MI); // Visit \p MI instruction and perform tasks according to visit mode. - void visitMemIntrinsic(MemIntrinsic &SI); + void visitPlainMemIntrinsic(PlainMemIntrinsic &SI); unsigned getNumOfMemIntrinsics() const { return NMemIs; } }; @@ -1182,7 +1182,7 @@ llvm_unreachable("Unknown visiting mode"); } -void MemIntrinsicVisitor::instrumentOneMemIntrinsic(MemIntrinsic &MI) { +void MemIntrinsicVisitor::instrumentOneMemIntrinsic(PlainMemIntrinsic &MI) { Module *M = F.getParent(); IRBuilder<> Builder(&MI); Type *Int64Ty = Builder.getInt64Ty(); @@ -1197,7 +1197,7 @@ ++CurCtrId; } -void MemIntrinsicVisitor::visitMemIntrinsic(MemIntrinsic &MI) { +void MemIntrinsicVisitor::visitPlainMemIntrinsic(PlainMemIntrinsic &MI) { if (!PGOInstrMemOP) return; Value *Length = MI.getLength(); Index: lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp =================================================================== --- lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp +++ lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp @@ -157,7 +157,7 @@ } } - void visitMemIntrinsic(MemIntrinsic &MI) { + void visitPlainMemIntrinsic(PlainMemIntrinsic &MI) { Value *Length = MI.getLength(); // Not perform on constant length calls. if (dyn_cast(Length)) @@ -170,14 +170,14 @@ BlockFrequencyInfo &BFI; OptimizationRemarkEmitter &ORE; bool Changed; - std::vector WorkList; + std::vector WorkList; // Start of the previse range. int64_t PreciseRangeStart; // Last value of the previse range. int64_t PreciseRangeLast; // The space to read the profile annotation. std::unique_ptr ValueDataArray; - bool perform(MemIntrinsic *MI); + bool perform(PlainMemIntrinsic *MI); // This kind shows which group the value falls in. For PreciseValue, we have // the profile count for that value. LargeGroup groups the values that are in @@ -193,7 +193,7 @@ } }; -static const char *getMIName(const MemIntrinsic *MI) { +static const char *getMIName(const PlainMemIntrinsic *MI) { switch (MI->getIntrinsicID()) { case Intrinsic::memcpy: return "memcpy"; @@ -224,7 +224,7 @@ return ScaleCount / Denom; } -bool MemOPSizeOpt::perform(MemIntrinsic *MI) { +bool MemOPSizeOpt::perform(PlainMemIntrinsic *MI) { assert(MI); if (MI->getIntrinsicID() == Intrinsic::memmove) return false; @@ -365,7 +365,7 @@ Ctx, Twine("MemOP.Case.") + Twine(SizeId), &Func, DefaultBB); Instruction *NewInst = MI->clone(); // Fix the argument. - MemIntrinsic * MemI = dyn_cast(NewInst); + PlainMemIntrinsic * MemI = dyn_cast(NewInst); IntegerType *SizeType = dyn_cast(MemI->getLength()->getType()); assert(SizeType && "Expected integer type size argument."); ConstantInt *CaseSizeId = ConstantInt::get(SizeType, SizeId); Index: lib/Transforms/Instrumentation/ThreadSanitizer.cpp =================================================================== --- lib/Transforms/Instrumentation/ThreadSanitizer.cpp +++ lib/Transforms/Instrumentation/ThreadSanitizer.cpp @@ -429,7 +429,7 @@ else if (isa(Inst) || isa(Inst)) { if (CallInst *CI = dyn_cast(&Inst)) maybeMarkSanitizerLibraryCallNoBuiltin(CI, TLI); - if (isa(Inst)) + if (isa(Inst)) MemIntrinCalls.push_back(&Inst); HasCalls = true; chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores, @@ -568,16 +568,16 @@ // we will need to call e.g. __tsan_memset to avoid the intrinsics. bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) { IRBuilder<> IRB(I); - if (MemSetInst *M = dyn_cast(I)) { + if (PlainMemSetInst *M = dyn_cast(I)) { IRB.CreateCall( MemsetFn, {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()), IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false), IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)}); I->eraseFromParent(); - } else if (MemTransferInst *M = dyn_cast(I)) { + } else if (PlainMemTransferInst *M = dyn_cast(I)) { IRB.CreateCall( - isa(M) ? MemcpyFn : MemmoveFn, + isa(M) ? MemcpyFn : MemmoveFn, {IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()), IRB.CreatePointerCast(M->getArgOperand(1), IRB.getInt8PtrTy()), IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false)}); Index: lib/Transforms/Scalar/AlignmentFromAssumptions.cpp =================================================================== --- lib/Transforms/Scalar/AlignmentFromAssumptions.cpp +++ lib/Transforms/Scalar/AlignmentFromAssumptions.cpp @@ -335,7 +335,7 @@ SI->setAlignment(NewAlignment); ++NumStoreAlignChanged; } - } else if (MemIntrinsic *MI = dyn_cast(J)) { + } else if (PlainMemIntrinsic *MI = dyn_cast(J)) { unsigned NewDestAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MI->getDest(), SE); @@ -344,16 +344,16 @@ // instruction, but only for one operand, save it. If we reach the // other operand through another assumption later, then we may // change the alignment at that point. - if (MemTransferInst *MTI = dyn_cast(MI)) { + if (PlainMemTransferInst *MTI = dyn_cast(MI)) { unsigned NewSrcAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MTI->getSource(), SE); - DenseMap::iterator DI = + DenseMap::iterator DI = NewDestAlignments.find(MTI); unsigned AltDestAlignment = (DI == NewDestAlignments.end()) ? 0 : DI->second; - DenseMap::iterator SI = + DenseMap::iterator SI = NewSrcAlignments.find(MTI); unsigned AltSrcAlignment = (SI == NewSrcAlignments.end()) ? 0 : SI->second; @@ -382,7 +382,7 @@ NewDestAlignments.insert(std::make_pair(MTI, NewDestAlignment)); NewSrcAlignments.insert(std::make_pair(MTI, NewSrcAlignment)); } else if (NewDestAlignment > MI->getAlignment()) { - assert((!isa(MI) || isa(MI)) && + assert((!isa(MI) || isa(MI)) && "Unknown memory intrinsic"); MI->setAlignment(ConstantInt::get(Type::getInt32Ty( Index: lib/Transforms/Scalar/DeadStoreElimination.cpp =================================================================== --- lib/Transforms/Scalar/DeadStoreElimination.cpp +++ lib/Transforms/Scalar/DeadStoreElimination.cpp @@ -162,7 +162,7 @@ if (StoreInst *SI = dyn_cast(Inst)) return MemoryLocation::get(SI); - if (MemIntrinsic *MI = dyn_cast(Inst)) { + if (PlainMemIntrinsic *MI = dyn_cast(Inst)) { // memcpy/memmove/memset. MemoryLocation Loc = MemoryLocation::getForDest(MI); return Loc; @@ -194,7 +194,7 @@ // The only instructions that both read and write are the mem transfer // instructions (memcpy/memmove). - if (MemTransferInst *MTI = dyn_cast(Inst)) + if (PlainMemTransferInst *MTI = dyn_cast(Inst)) return MemoryLocation::getForSource(MTI); return MemoryLocation(); } @@ -221,7 +221,7 @@ case Intrinsic::memmove: case Intrinsic::memcpy: // Don't remove volatile memory intrinsics. - return !cast(II)->isVolatile(); + return !cast(II)->isVolatile(); } } @@ -268,7 +268,7 @@ static Value *getStoredPointerOperand(Instruction *I) { if (StoreInst *SI = dyn_cast(I)) return SI->getPointerOperand(); - if (MemIntrinsic *MI = dyn_cast(I)) + if (PlainMemIntrinsic *MI = dyn_cast(I)) return MI->getDest(); if (IntrinsicInst *II = dyn_cast(I)) { @@ -830,7 +830,7 @@ LoadedLoc = MemoryLocation::get(L); } else if (VAArgInst *V = dyn_cast(BBI)) { LoadedLoc = MemoryLocation::get(V); - } else if (MemTransferInst *MTI = dyn_cast(BBI)) { + } else if (PlainMemTransferInst *MTI = dyn_cast(BBI)) { LoadedLoc = MemoryLocation::getForSource(MTI); } else if (!BBI->mayReadFromMemory()) { // Instruction doesn't read memory. Note that stores that weren't removed @@ -863,7 +863,7 @@ // Power of 2 vector writes are probably always a bad idea to optimize // as any store/memset/memcpy is likely using vector instructions so // shortening it to not vector size is likely to be slower - MemIntrinsic *EarlierIntrinsic = cast(EarlierWrite); + PlainMemIntrinsic *EarlierIntrinsic = cast(EarlierWrite); unsigned EarlierWriteAlign = EarlierIntrinsic->getAlignment(); if (!IsOverwriteEnd) LaterOffset = int64_t(LaterOffset + LaterSize); Index: lib/Transforms/Scalar/GVN.cpp =================================================================== --- lib/Transforms/Scalar/GVN.cpp +++ lib/Transforms/Scalar/GVN.cpp @@ -174,7 +174,7 @@ return Res; } - static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset = 0) { + static AvailableValue getMI(PlainMemIntrinsic *MI, unsigned Offset = 0) { AvailableValue Res; Res.Val.setPointer(MI); Res.Val.setInt(MemIntrin); @@ -213,9 +213,9 @@ return cast(Val.getPointer()); } - MemIntrinsic *getMemIntrinValue() const { + PlainMemIntrinsic *getMemIntrinValue() const { assert(isMemIntrinValue() && "Wrong accessor"); - return cast(Val.getPointer()); + return cast(Val.getPointer()); } /// Emit code at the specified insertion point to adjust the value defined @@ -902,7 +902,7 @@ // If the clobbering value is a memset/memcpy/memmove, see if we can // forward a value on from it. - if (MemIntrinsic *DepMI = dyn_cast(DepInfo.getInst())) { + if (PlainMemIntrinsic *DepMI = dyn_cast(DepInfo.getInst())) { if (Address && !LI->isAtomic()) { int Offset = analyzeLoadFromClobberingMemInst(LI->getType(), Address, DepMI, DL); Index: lib/Transforms/Scalar/InferAddressSpaces.cpp =================================================================== --- lib/Transforms/Scalar/InferAddressSpaces.cpp +++ lib/Transforms/Scalar/InferAddressSpaces.cpp @@ -333,12 +333,12 @@ PushPtrOperand(RMW->getPointerOperand()); else if (auto *CmpX = dyn_cast(&I)) PushPtrOperand(CmpX->getPointerOperand()); - else if (auto *MI = dyn_cast(&I)) { + else if (auto *MI = dyn_cast(&I)) { // For memset/memcpy/memmove, any pointer operand can be replaced. PushPtrOperand(MI->getRawDest()); // Handle 2nd operand for memcpy/memmove. - if (auto *MTI = dyn_cast(MI)) + if (auto *MTI = dyn_cast(MI)) PushPtrOperand(MTI->getRawSource()); } else if (auto *II = dyn_cast(&I)) collectRewritableIntrinsicOperands(II, PostorderStack, Visited); @@ -735,19 +735,19 @@ /// Update memory intrinsic uses that require more complex processing than /// simple memory instructions. Thse require re-mangling and may have multiple /// pointer operands. -static bool handleMemIntrinsicPtrUse(MemIntrinsic *MI, Value *OldV, +static bool handleMemIntrinsicPtrUse(PlainMemIntrinsic *MI, Value *OldV, Value *NewV) { IRBuilder<> B(MI); MDNode *TBAA = MI->getMetadata(LLVMContext::MD_tbaa); MDNode *ScopeMD = MI->getMetadata(LLVMContext::MD_alias_scope); MDNode *NoAliasMD = MI->getMetadata(LLVMContext::MD_noalias); - if (auto *MSI = dyn_cast(MI)) { + if (auto *MSI = dyn_cast(MI)) { B.CreateMemSet(NewV, MSI->getValue(), MSI->getLength(), MSI->getAlignment(), false, // isVolatile TBAA, ScopeMD, NoAliasMD); - } else if (auto *MTI = dyn_cast(MI)) { + } else if (auto *MTI = dyn_cast(MI)) { Value *Src = MTI->getRawSource(); Value *Dest = MTI->getRawDest(); @@ -758,14 +758,14 @@ if (Dest == OldV) Dest = NewV; - if (isa(MTI)) { + if (isa(MTI)) { MDNode *TBAAStruct = MTI->getMetadata(LLVMContext::MD_tbaa_struct); B.CreateMemCpy(Dest, Src, MTI->getLength(), MTI->getAlignment(), false, // isVolatile TBAA, TBAAStruct, ScopeMD, NoAliasMD); } else { - assert(isa(MTI)); + assert(isa(MTI)); B.CreateMemMove(Dest, Src, MTI->getLength(), MTI->getAlignment(), false, // isVolatile @@ -890,7 +890,7 @@ User *CurUser = U.getUser(); // Handle more complex cases like intrinsic that need to be remangled. - if (auto *MI = dyn_cast(CurUser)) { + if (auto *MI = dyn_cast(CurUser)) { if (!MI->isVolatile() && handleMemIntrinsicPtrUse(MI, V, NewV)) continue; } Index: lib/Transforms/Scalar/LoopIdiomRecognize.cpp =================================================================== --- lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -132,7 +132,7 @@ LegalStoreKind isLegalStore(StoreInst *SI); bool processLoopStores(SmallVectorImpl &SL, const SCEV *BECount, bool ForMemset); - bool processLoopMemSet(MemSetInst *MSI, const SCEV *BECount); + bool processLoopMemSet(PlainMemSetInst *MSI, const SCEV *BECount); bool processLoopStridedStore(Value *DestPtr, unsigned StoreSize, unsigned StoreAlignment, Value *StoredVal, @@ -527,7 +527,7 @@ for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { Instruction *Inst = &*I++; // Look for memset instructions, which may be optimized to a larger memset. - if (MemSetInst *MSI = dyn_cast(Inst)) { + if (PlainMemSetInst *MSI = dyn_cast(Inst)) { WeakTrackingVH InstPtr(&*I); if (!processLoopMemSet(MSI, BECount)) continue; @@ -685,7 +685,7 @@ } /// processLoopMemSet - See if this memset can be promoted to a large memset. -bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI, +bool LoopIdiomRecognize::processLoopMemSet(PlainMemSetInst *MSI, const SCEV *BECount) { // We can only handle non-volatile memsets with a constant size. if (MSI->isVolatile() || !isa(MSI->getLength())) Index: lib/Transforms/Scalar/LoopRerollPass.cpp =================================================================== --- lib/Transforms/Scalar/LoopRerollPass.cpp +++ lib/Transforms/Scalar/LoopRerollPass.cpp @@ -745,7 +745,7 @@ return LI->isUnordered(); if (StoreInst *SI = dyn_cast(I)) return SI->isUnordered(); - if (MemIntrinsic *MI = dyn_cast(I)) + if (PlainMemIntrinsic *MI = dyn_cast(I)) return !MI->isVolatile(); return false; } Index: lib/Transforms/Scalar/MemCpyOptimizer.cpp =================================================================== --- lib/Transforms/Scalar/MemCpyOptimizer.cpp +++ lib/Transforms/Scalar/MemCpyOptimizer.cpp @@ -242,7 +242,7 @@ if (StoreInst *SI = dyn_cast(Inst)) addStore(OffsetFromFirst, SI); else - addMemSet(OffsetFromFirst, cast(Inst)); + addMemSet(OffsetFromFirst, cast(Inst)); } void addStore(int64_t OffsetFromFirst, StoreInst *SI) { @@ -252,7 +252,7 @@ SI->getPointerOperand(), SI->getAlignment(), SI); } - void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) { + void addMemSet(int64_t OffsetFromFirst, PlainMemSetInst *MSI) { int64_t Size = cast(MSI->getLength())->getZExtValue(); addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getAlignment(), MSI); } @@ -391,7 +391,7 @@ BasicBlock::iterator BI(StartInst); for (++BI; !isa(BI); ++BI) { - if (!isa(BI) && !isa(BI)) { + if (!isa(BI) && !isa(BI)) { // If the instruction is readnone, ignore it, otherwise bail out. We // don't even allow readonly here because we don't want something like: // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A). @@ -416,7 +416,7 @@ Ranges.addStore(Offset, NextStore); } else { - MemSetInst *MSI = cast(BI); + PlainMemSetInst *MSI = cast(BI); if (MSI->isVolatile() || ByteVal != MSI->getValue() || !isa(MSI->getLength())) @@ -683,7 +683,7 @@ // a memcpy. MemDepResult ldep = MD->getDependency(LI); CallInst *C = nullptr; - if (ldep.isClobber() && !isa(ldep.getInst())) + if (ldep.isClobber() && !isa(ldep.getInst())) C = dyn_cast(ldep.getInst()); if (C) { @@ -768,7 +768,7 @@ return false; } -bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) { +bool MemCpyOptPass::processMemSet(PlainMemSetInst *MSI, BasicBlock::iterator &BBI) { // See if there is another memset or store neighboring this memset which // allows us to widen out the memset to do a single larger store. if (isa(MSI->getLength()) && !MSI->isVolatile()) @@ -988,8 +988,8 @@ /// We've found that the (upward scanning) memory dependence of memcpy 'M' is /// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can. -bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M, - MemCpyInst *MDep) { +bool MemCpyOptPass::processMemCpyMemCpyDependence(PlainMemCpyInst *M, + PlainMemCpyInst *MDep) { // We can only transforms memcpy's where the dest of one is the source of the // other. if (M->getSource() != MDep->getDest() || MDep->isVolatile()) @@ -1076,8 +1076,8 @@ /// memcpy(dst, src, src_size); /// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size); /// \endcode -bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy, - MemSetInst *MemSet) { +bool MemCpyOptPass::processMemSetMemCpyDependence(PlainMemCpyInst *MemCpy, + PlainMemSetInst *MemSet) { // We can only transform memset/memcpy with the same destination. if (MemSet->getDest() != MemCpy->getDest()) return false; @@ -1141,8 +1141,8 @@ /// When dst2_size <= dst1_size. /// /// The \p MemCpy must have a Constant length. -bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy, - MemSetInst *MemSet) { +bool MemCpyOptPass::performMemCpyToMemSetOptzn(PlainMemCpyInst *MemCpy, + PlainMemSetInst *MemSet) { AliasAnalysis &AA = LookupAliasAnalysis(); // Make sure that memcpy(..., memset(...), ...), that is we are memsetting and @@ -1168,7 +1168,7 @@ /// B to be a memcpy from X to Z (or potentially a memmove, depending on /// circumstances). This allows later passes to remove the first memcpy /// altogether. -bool MemCpyOptPass::processMemCpy(MemCpyInst *M) { +bool MemCpyOptPass::processMemCpy(PlainMemCpyInst *M) { // We can only optimize non-volatile memcpy's. if (M->isVolatile()) return false; @@ -1197,7 +1197,7 @@ // Try to turn a partially redundant memset + memcpy into // memcpy + smaller memset. We don't need the memcpy size for this. if (DepInfo.isClobber()) - if (MemSetInst *MDep = dyn_cast(DepInfo.getInst())) + if (PlainMemSetInst *MDep = dyn_cast(DepInfo.getInst())) if (processMemSetMemCpyDependence(M, MDep)) return true; @@ -1229,7 +1229,7 @@ SrcLoc, true, M->getIterator(), M->getParent()); if (SrcDepInfo.isClobber()) { - if (MemCpyInst *MDep = dyn_cast(SrcDepInfo.getInst())) + if (PlainMemCpyInst *MDep = dyn_cast(SrcDepInfo.getInst())) return processMemCpyMemCpyDependence(M, MDep); } else if (SrcDepInfo.isDef()) { Instruction *I = SrcDepInfo.getInst(); @@ -1253,7 +1253,7 @@ } if (SrcDepInfo.isClobber()) - if (MemSetInst *MDep = dyn_cast(SrcDepInfo.getInst())) + if (PlainMemSetInst *MDep = dyn_cast(SrcDepInfo.getInst())) if (performMemCpyToMemSetOptzn(M, MDep)) { MD->removeInstruction(M); M->eraseFromParent(); @@ -1266,7 +1266,7 @@ /// Transforms memmove calls to memcpy calls when the src/dst are guaranteed /// not to alias. -bool MemCpyOptPass::processMemMove(MemMoveInst *M) { +bool MemCpyOptPass::processMemMove(PlainMemMoveInst *M) { AliasAnalysis &AA = LookupAliasAnalysis(); if (!TLI->has(LibFunc_memmove)) @@ -1311,7 +1311,7 @@ // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by // a memcpy, see if we can byval from the source of the memcpy instead of the // result. - MemCpyInst *MDep = dyn_cast(DepInfo.getInst()); + PlainMemCpyInst *MDep = dyn_cast(DepInfo.getInst()); if (!MDep || MDep->isVolatile() || ByValArg->stripPointerCasts() != MDep->getDest()) return false; @@ -1384,11 +1384,11 @@ if (StoreInst *SI = dyn_cast(I)) MadeChange |= processStore(SI, BI); - else if (MemSetInst *M = dyn_cast(I)) + else if (PlainMemSetInst *M = dyn_cast(I)) RepeatInstruction = processMemSet(M, BI); - else if (MemCpyInst *M = dyn_cast(I)) + else if (PlainMemCpyInst *M = dyn_cast(I)) RepeatInstruction = processMemCpy(M); - else if (MemMoveInst *M = dyn_cast(I)) + else if (PlainMemMoveInst *M = dyn_cast(I)) RepeatInstruction = processMemMove(M); else if (auto CS = CallSite(I)) { for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) Index: lib/Transforms/Scalar/NewGVN.cpp =================================================================== --- lib/Transforms/Scalar/NewGVN.cpp +++ lib/Transforms/Scalar/NewGVN.cpp @@ -1327,7 +1327,7 @@ } } - } else if (auto *DepMI = dyn_cast(DepInst)) { + } else if (auto *DepMI = dyn_cast(DepInst)) { int Offset = analyzeLoadFromClobberingMemInst(LoadType, LoadPtr, DepMI, DL); if (Offset >= 0) { if (auto *PossibleConstant = Index: lib/Transforms/Scalar/SROA.cpp =================================================================== --- lib/Transforms/Scalar/SROA.cpp +++ lib/Transforms/Scalar/SROA.cpp @@ -816,7 +816,7 @@ handleLoadOrStore(ValOp->getType(), SI, Offset, Size, SI.isVolatile()); } - void visitMemSetInst(MemSetInst &II) { + void visitPlainMemSetInst(PlainMemSetInst &II) { assert(II.getRawDest() == *U && "Pointer use is not the destination?"); ConstantInt *Length = dyn_cast(II.getLength()); if ((Length && Length->getValue() == 0) || @@ -832,7 +832,7 @@ (bool)Length); } - void visitMemTransferInst(MemTransferInst &II) { + void visitPlainMemTransferInst(PlainMemTransferInst &II) { ConstantInt *Length = dyn_cast(II.getLength()); if (Length && Length->getValue() == 0) // Zero-length mem transfer intrinsics can be ignored entirely. @@ -1785,7 +1785,7 @@ Use *U = S.getUse(); - if (MemIntrinsic *MI = dyn_cast(U->getUser())) { + if (PlainMemIntrinsic *MI = dyn_cast(U->getUser())) { if (MI->isVolatile()) return false; if (!S.isSplittable()) @@ -1999,7 +1999,7 @@ // they are promotable. return false; } - } else if (MemIntrinsic *MI = dyn_cast(U->getUser())) { + } else if (PlainMemIntrinsic *MI = dyn_cast(U->getUser())) { if (MI->isVolatile() || !isa(MI->getLength())) return false; if (!S.isSplittable()) @@ -2668,7 +2668,7 @@ return V; } - bool visitMemSetInst(MemSetInst &II) { + bool visitPlainMemSetInst(PlainMemSetInst &II) { DEBUG(dbgs() << " original: " << II << "\n"); assert(II.getRawDest() == OldPtr); @@ -2774,7 +2774,7 @@ return !II.isVolatile(); } - bool visitMemTransferInst(MemTransferInst &II) { + bool visitPlainMemTransferInst(PlainMemTransferInst &II) { // Rewriting of memory transfer instructions can be a bit tricky. We break // them into two categories: split intrinsics and unsplit intrinsics. Index: lib/Transforms/Utils/AddDiscriminators.cpp =================================================================== --- lib/Transforms/Utils/AddDiscriminators.cpp +++ lib/Transforms/Utils/AddDiscriminators.cpp @@ -103,7 +103,7 @@ } static bool shouldHaveDiscriminator(const Instruction *I) { - return !isa(I) || isa(I); + return !isa(I) || isa(I); } /// \brief Assign DWARF discriminators. Index: lib/Transforms/Utils/Evaluator.cpp =================================================================== --- lib/Transforms/Utils/Evaluator.cpp +++ lib/Transforms/Utils/Evaluator.cpp @@ -366,7 +366,7 @@ } if (IntrinsicInst *II = dyn_cast(CS.getInstruction())) { - if (MemSetInst *MSI = dyn_cast(II)) { + if (PlainMemSetInst *MSI = dyn_cast(II)) { if (MSI->isVolatile()) { DEBUG(dbgs() << "Can not optimize a volatile memset " << "intrinsic.\n"); Index: lib/Transforms/Utils/GlobalStatus.cpp =================================================================== --- lib/Transforms/Utils/GlobalStatus.cpp +++ lib/Transforms/Utils/GlobalStatus.cpp @@ -154,14 +154,14 @@ return true; } else if (isa(I)) { GS.IsCompared = true; - } else if (const MemTransferInst *MTI = dyn_cast(I)) { + } else if (const PlainMemTransferInst *MTI = dyn_cast(I)) { if (MTI->isVolatile()) return true; if (MTI->getArgOperand(0) == V) GS.StoredType = GlobalStatus::Stored; if (MTI->getArgOperand(1) == V) GS.IsLoaded = true; - } else if (const MemSetInst *MSI = dyn_cast(I)) { + } else if (const PlainMemSetInst *MSI = dyn_cast(I)) { assert(MSI->getArgOperand(0) == V && "Memset only takes one pointer!"); if (MSI->isVolatile()) return true; Index: lib/Transforms/Utils/LowerMemIntrinsics.cpp =================================================================== --- lib/Transforms/Utils/LowerMemIntrinsics.cpp +++ lib/Transforms/Utils/LowerMemIntrinsics.cpp @@ -452,7 +452,7 @@ NewBB); } -void llvm::expandMemCpyAsLoop(MemCpyInst *Memcpy, +void llvm::expandMemCpyAsLoop(PlainMemCpyInst *Memcpy, const TargetTransformInfo &TTI) { // Original implementation if (!TTI.useWideIRMemcpyLoopLowering()) { @@ -489,7 +489,7 @@ } } -void llvm::expandMemMoveAsLoop(MemMoveInst *Memmove) { +void llvm::expandMemMoveAsLoop(PlainMemMoveInst *Memmove) { createMemMoveLoop(/* InsertBefore */ Memmove, /* SrcAddr */ Memmove->getRawSource(), /* DstAddr */ Memmove->getRawDest(), @@ -500,7 +500,7 @@ /* DstIsVolatile */ Memmove->isVolatile()); } -void llvm::expandMemSetAsLoop(MemSetInst *Memset) { +void llvm::expandMemSetAsLoop(PlainMemSetInst *Memset) { createMemSetLoop(/* InsertBefore */ Memset, /* DstAddr */ Memset->getRawDest(), /* CopyLen */ Memset->getLength(), Index: lib/Transforms/Utils/VNCoercion.cpp =================================================================== --- lib/Transforms/Utils/VNCoercion.cpp +++ lib/Transforms/Utils/VNCoercion.cpp @@ -249,7 +249,7 @@ } int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, - MemIntrinsic *MI, const DataLayout &DL) { + PlainMemIntrinsic *MI, const DataLayout &DL) { // If the mem operation is a non-constant size, we can't handle it. ConstantInt *SizeCst = dyn_cast(MI->getLength()); if (!SizeCst) @@ -265,7 +265,7 @@ // If we have a memcpy/memmove, the only case we can handle is if this is a // copy from constant memory. In that case, we can read directly from the // constant memory. - MemTransferInst *MTI = cast(MI); + PlainMemTransferInst *MTI = cast(MI); Constant *Src = dyn_cast(MTI->getSource()); if (!Src) @@ -416,7 +416,7 @@ } template -T *getMemInstValueForLoadHelper(MemIntrinsic *SrcInst, unsigned Offset, +T *getMemInstValueForLoadHelper(PlainMemIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, HelperClass &Helper, const DataLayout &DL) { LLVMContext &Ctx = LoadTy->getContext(); @@ -424,7 +424,7 @@ // We know that this method is only called when the mem transfer fully // provides the bits for the load. - if (MemSetInst *MSI = dyn_cast(SrcInst)) { + if (PlainMemSetInst *MSI = dyn_cast(SrcInst)) { // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and // independently of what the offset is. T *Val = cast(MSI->getValue()); @@ -454,7 +454,7 @@ } // Otherwise, this is a memcpy/memmove from a constant global. - MemTransferInst *MTI = cast(SrcInst); + PlainMemTransferInst *MTI = cast(SrcInst); Constant *Src = cast(MTI->getSource()); unsigned AS = Src->getType()->getPointerAddressSpace(); @@ -472,7 +472,7 @@ /// This function is called when we have a /// memdep query of a load that ends up being a clobbering mem intrinsic. -Value *getMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, +Value *getMemInstValueForLoad(PlainMemIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL) { IRBuilder<> Builder(InsertPt); @@ -480,11 +480,11 @@ LoadTy, Builder, DL); } -Constant *getConstantMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, +Constant *getConstantMemInstValueForLoad(PlainMemIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, const DataLayout &DL) { // The only case analyzeLoadFromClobberingMemInst cannot be converted to a // constant is when it's a memset of a non-constant. - if (auto *MSI = dyn_cast(SrcInst)) + if (auto *MSI = dyn_cast(SrcInst)) if (!isa(MSI->getValue())) return nullptr; ConstantFolder F; Index: lib/Transforms/Vectorize/SLPVectorizer.cpp =================================================================== --- lib/Transforms/Vectorize/SLPVectorizer.cpp +++ lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -497,7 +497,7 @@ return LI->isSimple(); if (StoreInst *SI = dyn_cast(I)) return SI->isSimple(); - if (MemIntrinsic *MI = dyn_cast(I)) + if (PlainMemIntrinsic *MI = dyn_cast(I)) return !MI->isVolatile(); return true; } Index: test/Instrumentation/DataFlowSanitizer/unordered_atomic_mem_intrins.ll =================================================================== --- test/Instrumentation/DataFlowSanitizer/unordered_atomic_mem_intrins.ll +++ test/Instrumentation/DataFlowSanitizer/unordered_atomic_mem_intrins.ll @@ -4,7 +4,7 @@ target triple = "x86_64-unknown-linux-gnu" ;; Placeholder tests that will fail once element atomic @llvm.mem[move|set] instrinsics have -;; been added to the MemIntrinsic class hierarchy. These will act as a reminder to +;; been added to the PlainMemIntrinsic class hierarchy. These will act as a reminder to ;; verify that dfsan handles these intrinsics properly once they have been ;; added to that class hierarchy. Index: test/Instrumentation/MemorySanitizer/msan_basic.ll =================================================================== --- test/Instrumentation/MemorySanitizer/msan_basic.ll +++ test/Instrumentation/MemorySanitizer/msan_basic.ll @@ -240,7 +240,7 @@ ;; ------------ ;; Placeholder tests that will fail once element atomic @llvm.mem[cpy|move|set] instrinsics have -;; been added to the MemIntrinsic class hierarchy. These will act as a reminder to +;; been added to the PlainMemIntrinsic class hierarchy. These will act as a reminder to ;; verify that MSAN handles these intrinsics properly once they have been ;; added to that class hierarchy. declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* nocapture writeonly, i8, i64, i32) nounwind Index: test/Transforms/InstCombine/element-atomic-memintrins.ll =================================================================== --- test/Transforms/InstCombine/element-atomic-memintrins.ll +++ test/Transforms/InstCombine/element-atomic-memintrins.ll @@ -1,5 +1,5 @@ ;; Placeholder tests that will fail once element atomic @llvm.mem[move|set] instrinsics have -;; been added to the MemIntrinsic class hierarchy. These will act as a reminder to +;; been added to the PlainMemIntrinsic class hierarchy. These will act as a reminder to ;; verify that inst combine handles these intrinsics properly once they have been ;; added to that class hierarchy.