diff --git a/llvm/include/llvm/Analysis/MemoryLocation.h b/llvm/include/llvm/Analysis/MemoryLocation.h --- a/llvm/include/llvm/Analysis/MemoryLocation.h +++ b/llvm/include/llvm/Analysis/MemoryLocation.h @@ -28,12 +28,15 @@ class StoreInst; class MemTransferInst; class MemIntrinsic; +class MemWriteIntrinsic; class AtomicCmpXchgInst; class AtomicMemTransferInst; class AtomicMemIntrinsic; +class AtomicMemWriteIntrinsic; class AtomicRMWInst; class AnyMemTransferInst; class AnyMemIntrinsic; +class AnyMemWriteIntrinsic; class TargetLibraryInfo; class VAArgInst; @@ -250,9 +253,9 @@ /// Return a location representing the destination of a memory set or /// transfer. - static MemoryLocation getForDest(const MemIntrinsic *MI); - static MemoryLocation getForDest(const AtomicMemIntrinsic *MI); - static MemoryLocation getForDest(const AnyMemIntrinsic *MI); + static MemoryLocation getForDest(const MemWriteIntrinsic *MI); + static MemoryLocation getForDest(const AtomicMemWriteIntrinsic *MI); + static MemoryLocation getForDest(const AnyMemWriteIntrinsic *MI); /// Return a location representing a particular argument of a call. static MemoryLocation getForArgument(const CallBase *Call, unsigned ArgIdx, diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h --- a/llvm/include/llvm/IR/IntrinsicInst.h +++ b/llvm/include/llvm/IR/IntrinsicInst.h @@ -631,149 +631,192 @@ /// three atomicity hierarchies. template class MemIntrinsicBase : public IntrinsicInst { private: - enum { ARG_DEST = 0, ARG_LENGTH = 2 }; + enum { ARG_LENGTH = 2 }; -public: - Value *getRawDest() const { - return const_cast(getArgOperand(ARG_DEST)); +protected: + Value *getRawArg(unsigned ArgIndex) const { + return const_cast(getArgOperand(ArgIndex)); } - const Use &getRawDestUse() const { return getArgOperandUse(ARG_DEST); } - Use &getRawDestUse() { return getArgOperandUse(ARG_DEST); } - Value *getLength() const { - return const_cast(getArgOperand(ARG_LENGTH)); + const Use &getRawArgUse(unsigned ArgIndex) const { + return getArgOperandUse(ArgIndex); } - const Use &getLengthUse() const { return getArgOperandUse(ARG_LENGTH); } - Use &getLengthUse() { return getArgOperandUse(ARG_LENGTH); } - /// This is just like getRawDest, but it strips off any cast - /// instructions (including addrspacecast) that feed it, giving the - /// original input. The returned value is guaranteed to be a pointer. - Value *getDest() const { return getRawDest()->stripPointerCasts(); } + Use &getRawArgUse(unsigned ArgIndex) { + return getArgOperandUse(ArgIndex); + } - unsigned getDestAddressSpace() const { - return cast(getRawDest()->getType())->getAddressSpace(); + Value *getArg(unsigned ArgIndex) const { + return getRawArg(ArgIndex)->stripPointerCasts(); + } + + unsigned getArgAddressSpace(unsigned ArgIndex) const { + return cast(getRawArg(ArgIndex)->getType())->getAddressSpace(); } /// FIXME: Remove this function once transition to Align is over. - /// Use getDestAlign() instead. - unsigned getDestAlignment() const { - if (auto MA = getParamAlign(ARG_DEST)) + /// Use getSourceAlign() instead. + unsigned getArgAlignment(unsigned ArgIndex) const { + if (auto MA = getParamAlign(ArgIndex)) return MA->value(); return 0; } - MaybeAlign getDestAlign() const { return getParamAlign(ARG_DEST); } - /// Set the specified arguments of the instruction. - void setDest(Value *Ptr) { - assert(getRawDest()->getType() == Ptr->getType() && - "setDest called with pointer of wrong type!"); - setArgOperand(ARG_DEST, Ptr); + MaybeAlign getArgAlign(unsigned ArgIndex) const { + return getParamAlign(ArgIndex); + } + + void setArg(unsigned ArgIndex, Value *Ptr) { + assert(getRawArg(ArgIndex)->getType() == Ptr->getType() && + "setSource called with pointer of wrong type!"); + setArgOperand(ArgIndex, Ptr); } /// FIXME: Remove this function once transition to Align is over. /// Use the version that takes MaybeAlign instead of this one. - void setDestAlignment(unsigned Alignment) { - setDestAlignment(MaybeAlign(Alignment)); + void setArgAlignment(unsigned ArgIndex, unsigned Alignment) { + setArgAlignment(ArgIndex, MaybeAlign(Alignment)); } - void setDestAlignment(MaybeAlign Alignment) { - removeParamAttr(ARG_DEST, Attribute::Alignment); + + void setArgAlignment(unsigned ArgIndex, MaybeAlign Alignment) { + removeParamAttr(ArgIndex, Attribute::Alignment); if (Alignment) - addParamAttr(ARG_DEST, - Attribute::getWithAlignment(getContext(), *Alignment)); - } - void setDestAlignment(Align Alignment) { - removeParamAttr(ARG_DEST, Attribute::Alignment); - addParamAttr(ARG_DEST, - Attribute::getWithAlignment(getContext(), Alignment)); + addParamAttr(ArgIndex, Attribute::getWithAlignment( + getContext(), *Alignment)); } - void setLength(Value *L) { - assert(getLength()->getType() == L->getType() && - "setLength called with value of wrong type!"); - setArgOperand(ARG_LENGTH, L); + void setArgAlignment(unsigned ArgIndex, Align Alignment) { + removeParamAttr(ArgIndex, Attribute::Alignment); + addParamAttr(ArgIndex, Attribute::getWithAlignment( + getContext(), Alignment)); } + +public: + Value *getLength() const { return getRawArg(ARG_LENGTH); } + + const Use &getLengthUse() const { return getRawArgUse(ARG_LENGTH); } + + Use &getLengthUse() { return getRawArgUse(ARG_LENGTH); } + + void setLength(Value *L) { setArg(ARG_LENGTH, L); } }; /// Common base class for all memory transfer intrinsics. Simply provides /// common methods. template class MemTransferBase : public BaseCL { private: - enum { ARG_SOURCE = 1 }; + enum { ARG_DEST = 0, ARG_SOURCE = 1 }; public: /// Return the arguments to the instruction. Value *getRawSource() const { - return const_cast(BaseCL::getArgOperand(ARG_SOURCE)); + return BaseCL::getRawArg(ARG_SOURCE); } + const Use &getRawSourceUse() const { - return BaseCL::getArgOperandUse(ARG_SOURCE); + return BaseCL::getRawArgUse(ARG_SOURCE); + } + + Use &getRawSourceUse() { + return BaseCL::getRawArgUse(ARG_SOURCE); } - Use &getRawSourceUse() { return BaseCL::getArgOperandUse(ARG_SOURCE); } /// This is just like getRawSource, but it strips off any cast /// instructions that feed it, giving the original input. The returned /// value is guaranteed to be a pointer. - Value *getSource() const { return getRawSource()->stripPointerCasts(); } + Value *getSource() const { return BaseCL::getArg(ARG_SOURCE); } unsigned getSourceAddressSpace() const { - return cast(getRawSource()->getType())->getAddressSpace(); + return BaseCL::getArgAddressSpace(ARG_SOURCE); } /// FIXME: Remove this function once transition to Align is over. /// Use getSourceAlign() instead. unsigned getSourceAlignment() const { - if (auto MA = BaseCL::getParamAlign(ARG_SOURCE)) - return MA->value(); - return 0; + return BaseCL::getArgAlignment(ARG_SOURCE); } MaybeAlign getSourceAlign() const { - return BaseCL::getParamAlign(ARG_SOURCE); + return BaseCL::getArgAlign(ARG_SOURCE); } - void setSource(Value *Ptr) { - assert(getRawSource()->getType() == Ptr->getType() && - "setSource called with pointer of wrong type!"); - BaseCL::setArgOperand(ARG_SOURCE, Ptr); - } + void setSource(Value *Ptr) { BaseCL::setArg(ARG_SOURCE, Ptr); } /// FIXME: Remove this function once transition to Align is over. /// Use the version that takes MaybeAlign instead of this one. void setSourceAlignment(unsigned Alignment) { - setSourceAlignment(MaybeAlign(Alignment)); + BaseCL::setArgAlignment(ARG_SOURCE, Alignment); } + void setSourceAlignment(MaybeAlign Alignment) { - BaseCL::removeParamAttr(ARG_SOURCE, Attribute::Alignment); - if (Alignment) - BaseCL::addParamAttr(ARG_SOURCE, Attribute::getWithAlignment( - BaseCL::getContext(), *Alignment)); + BaseCL::setArgAlignment(ARG_SOURCE, Alignment); } + void setSourceAlignment(Align Alignment) { - BaseCL::removeParamAttr(ARG_SOURCE, Attribute::Alignment); - BaseCL::addParamAttr(ARG_SOURCE, Attribute::getWithAlignment( - BaseCL::getContext(), Alignment)); + BaseCL::setArgAlignment(ARG_SOURCE, Alignment); } + + /// This is just like getRawDest, but it strips off any cast + /// instructions (including addrspacecast) that feed it, giving the + /// original input. The returned value is guaranteed to be a pointer. + Value *getDest() const { return BaseCL::getArg(ARG_DEST); } + + unsigned getDestAddressSpace() const { + return BaseCL::getArgAddressSpace(ARG_DEST); + } + + /// FIXME: Remove this function once transition to Align is over. + /// Use getDestAlign() instead. + unsigned getDestAlignment() const { + return BaseCL::getArgAlignment(ARG_DEST); + } + + MaybeAlign getDestAlign() const { + return BaseCL::getArgAlign(ARG_DEST); + } + + /// Set the specified arguments of the instruction. + void setDest(Value *Ptr) { + BaseCL::setArg(ARG_DEST, Ptr); + } + + /// FIXME: Remove this function once transition to Align is over. + /// Use the version that takes MaybeAlign instead of this one. + void setDestAlignment(unsigned Alignment) { + BaseCL::setArgAlignment(ARG_DEST, Alignment); + } + + void setDestAlignment(MaybeAlign Alignment) { + BaseCL::setArgAlignment(ARG_DEST, Alignment); + } + + void setDestAlignment(Align Alignment) { + BaseCL::setArgAlignment(ARG_DEST, Alignment); + } + + Value *getRawDest() const { return BaseCL::getRawArg(ARG_DEST); } + + const Use &getRawDestUse() const { return BaseCL::getRawArgUse(ARG_DEST); } + + Use &getRawDestUse() { return BaseCL::getRawArgUse(ARG_DEST); } }; /// Common base class for all memset intrinsics. Simply provides /// common methods. template class MemSetBase : public BaseCL { private: - enum { ARG_VALUE = 1 }; + enum { ARG_DEST = 0, ARG_VALUE = 1 }; public: Value *getValue() const { - return const_cast(BaseCL::getArgOperand(ARG_VALUE)); + return BaseCL::getRawArg(ARG_VALUE); } - const Use &getValueUse() const { return BaseCL::getArgOperandUse(ARG_VALUE); } - Use &getValueUse() { return BaseCL::getArgOperandUse(ARG_VALUE); } - void setValue(Value *Val) { - assert(getValue()->getType() == Val->getType() && - "setValue called with value of wrong type!"); - BaseCL::setArgOperand(ARG_VALUE, Val); - } + const Use &getValueUse() const { return BaseCL::getArgUse(ARG_VALUE); } + + Use &getValueUse() { return BaseCL::getArgUse(ARG_VALUE); } + + void setValue(Value *Val) { BaseCL::setArg(ARG_VALUE, Val); } }; // The common base class for the atomic memset/memmove/memcpy intrinsics @@ -784,7 +827,7 @@ public: Value *getRawElementSizeInBytes() const { - return const_cast(getArgOperand(ARG_ELEMENTSIZE)); + return getRawArg(ARG_ELEMENTSIZE); } ConstantInt *getElementSizeInBytesCst() const { @@ -796,9 +839,7 @@ } void setElementSizeInBytes(Constant *V) { - assert(V->getType() == Type::getInt8Ty(getContext()) && - "setElementSizeInBytes called with value of wrong type!"); - setArgOperand(ARG_ELEMENTSIZE, V); + setArg(ARG_ELEMENTSIZE, V); } static bool classof(const IntrinsicInst *I) { @@ -816,9 +857,75 @@ } }; +template class MemWriteIntrinsicBase : public BaseCL { +private: + enum { ARG_DEST = 0 }; + +public: + /// This is just like getRawDest, but it strips off any cast + /// instructions (including addrspacecast) that feed it, giving the + /// original input. The returned value is guaranteed to be a pointer. + Value *getDest() const { return BaseCL::getArg(ARG_DEST); } + + unsigned getDestAddressSpace() const { + return BaseCL::getArgAddressSpace(ARG_DEST); + } + + /// FIXME: Remove this function once transition to Align is over. + /// Use getDestAlign() instead. + unsigned getDestAlignment() const { + return BaseCL::getArgAlignment(ARG_DEST); + } + + MaybeAlign getDestAlign() const { + return BaseCL::getArgAlign(ARG_DEST); + } + + /// Set the specified arguments of the instruction. + void setDest(Value *Ptr) { BaseCL::setArg(ARG_DEST, Ptr); } + + /// FIXME: Remove this function once transition to Align is over. + /// Use the version that takes MaybeAlign instead of this one. + void setDestAlignment(unsigned Alignment) { + BaseCL::setArgAlignment(ARG_DEST, Alignment); + } + + void setDestAlignment(MaybeAlign Alignment) { + BaseCL::setArgAlignment(ARG_DEST, Alignment); + } + + void setDestAlignment(Align Alignment) { + BaseCL::setArgAlignment(ARG_DEST, Alignment); + } + + Value *getRawDest() const { return BaseCL::getRawArg(ARG_DEST); } + + const Use &getRawDestUse() const { return BaseCL::getRawArgUse(ARG_DEST); } + + Use &getRawDestUse() { return BaseCL::getRawArgUse(ARG_DEST); } +}; + +class AtomicMemWriteIntrinsic + : public MemWriteIntrinsicBase { +public: + static bool classof(const IntrinsicInst *I) { + switch (I->getIntrinsicID()) { + case Intrinsic::memcpy_element_unordered_atomic: + case Intrinsic::memset_element_unordered_atomic: + case Intrinsic::memmove_element_unordered_atomic: + return true; + default: + return false; + } + } + static bool classof(const Value *V) { + return isa(V) && classof(cast(V)); + } +}; + /// This class represents atomic memset intrinsic // i.e. llvm.element.unordered.atomic.memset -class AtomicMemSetInst : public MemSetBase { +class AtomicMemSetInst : public MemSetBase { public: static bool classof(const IntrinsicInst *I) { return I->getIntrinsicID() == Intrinsic::memset_element_unordered_atomic; @@ -830,7 +937,7 @@ // This class wraps the atomic memcpy/memmove intrinsics // i.e. llvm.element.unordered.atomic.memcpy/memmove -class AtomicMemTransferInst : public MemTransferBase { +class AtomicMemTransferInst : public MemTransferBase { public: static bool classof(const IntrinsicInst *I) { switch (I->getIntrinsicID()) { @@ -901,8 +1008,27 @@ } }; +class MemWriteIntrinsic : public MemWriteIntrinsicBase { +public: + // Methods for support type inquiry through isa, cast, and dyn_cast: + static bool classof(const IntrinsicInst *I) { + switch (I->getIntrinsicID()) { + case Intrinsic::memcpy: + case Intrinsic::memmove: + case Intrinsic::memset: + case Intrinsic::memcpy_inline: + return true; + default: + return false; + } + } + static bool classof(const Value *V) { + return isa(V) && classof(cast(V)); + } +}; + /// This class wraps the llvm.memset intrinsic. -class MemSetInst : public MemSetBase { +class MemSetInst : public MemSetBase { public: // Methods for support type inquiry through isa, cast, and dyn_cast: static bool classof(const IntrinsicInst *I) { @@ -914,7 +1040,7 @@ }; /// This class wraps the llvm.memcpy/memmove intrinsics. -class MemTransferInst : public MemTransferBase { +class MemTransferInst : public MemTransferBase { public: // Methods for support type inquiry through isa, cast, and dyn_cast: static bool classof(const IntrinsicInst *I) { @@ -1004,10 +1130,31 @@ } }; +class AnyMemWriteIntrinsic : public MemWriteIntrinsicBase { +public: + static bool classof(const IntrinsicInst *I) { + switch (I->getIntrinsicID()) { + case Intrinsic::memcpy: + case Intrinsic::memcpy_inline: + case Intrinsic::memmove: + case Intrinsic::memset: + case Intrinsic::memcpy_element_unordered_atomic: + case Intrinsic::memmove_element_unordered_atomic: + case Intrinsic::memset_element_unordered_atomic: + return true; + default: + return false; + } + } + static bool classof(const Value *V) { + return isa(V) && classof(cast(V)); + } +}; + /// This class represents any memset intrinsic // i.e. llvm.element.unordered.atomic.memset // and llvm.memset -class AnyMemSetInst : public MemSetBase { +class AnyMemSetInst : public MemSetBase { public: static bool classof(const IntrinsicInst *I) { switch (I->getIntrinsicID()) { @@ -1026,7 +1173,7 @@ // This class wraps any memcpy/memmove intrinsics // i.e. llvm.element.unordered.atomic.memcpy/memmove // and llvm.memcpy/memmove -class AnyMemTransferInst : public MemTransferBase { +class AnyMemTransferInst : public MemTransferBase { public: static bool classof(const IntrinsicInst *I) { switch (I->getIntrinsicID()) { diff --git a/llvm/include/llvm/Transforms/Utils/VNCoercion.h b/llvm/include/llvm/Transforms/Utils/VNCoercion.h --- a/llvm/include/llvm/Transforms/Utils/VNCoercion.h +++ b/llvm/include/llvm/Transforms/Utils/VNCoercion.h @@ -25,7 +25,7 @@ class Constant; class StoreInst; class LoadInst; -class MemIntrinsic; +class MemWriteIntrinsic; class Instruction; class IRBuilderBase; class Value; @@ -68,7 +68,8 @@ /// On success, it returns the offset into DepMI that extraction would start. /// On failure, it returns -1. int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, - MemIntrinsic *DepMI, const DataLayout &DL); + MemWriteIntrinsic *DepMI, + const DataLayout &DL); /// If analyzeLoadFromClobberingStore returned an offset, this function can be /// used to actually perform the extraction of the bits from the store. It @@ -95,12 +96,13 @@ /// used to actually perform the extraction of the bits from the memory /// intrinsic. It inserts instructions to do so at InsertPt, and returns the /// extracted value. -Value *getMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, +Value *getMemInstValueForLoad(MemWriteIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL); // This is the same as getStoreValueForLoad, except it performs no insertion. // It returns nullptr if it cannot produce a constant. -Constant *getConstantMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, +Constant *getConstantMemInstValueForLoad(MemWriteIntrinsic *SrcInst, + unsigned Offset, Type *LoadTy, const DataLayout &DL); } } diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp --- a/llvm/lib/Analysis/LazyValueInfo.cpp +++ b/llvm/lib/Analysis/LazyValueInfo.cpp @@ -640,15 +640,15 @@ AddNonNullPointer(L->getPointerOperand(), PtrSet); } else if (StoreInst *S = dyn_cast(I)) { AddNonNullPointer(S->getPointerOperand(), PtrSet); - } else if (MemIntrinsic *MI = dyn_cast(I)) { - if (MI->isVolatile()) return; + } else if (MemWriteIntrinsic *MWI = dyn_cast(I)) { + if (MWI->isVolatile()) return; // FIXME: check whether it has a valuerange that excludes zero? - ConstantInt *Len = dyn_cast(MI->getLength()); + ConstantInt *Len = dyn_cast(MWI->getLength()); if (!Len || Len->isZero()) return; - AddNonNullPointer(MI->getRawDest(), PtrSet); - if (MemTransferInst *MTI = dyn_cast(MI)) + AddNonNullPointer(MWI->getRawDest(), PtrSet); + if (MemTransferInst *MTI = dyn_cast(MWI)) AddNonNullPointer(MTI->getRawSource(), PtrSet); } } diff --git a/llvm/lib/Analysis/MemoryLocation.cpp b/llvm/lib/Analysis/MemoryLocation.cpp --- a/llvm/lib/Analysis/MemoryLocation.cpp +++ b/llvm/lib/Analysis/MemoryLocation.cpp @@ -123,25 +123,25 @@ return MemoryLocation(MTI->getRawSource(), Size, AATags); } -MemoryLocation MemoryLocation::getForDest(const MemIntrinsic *MI) { - return getForDest(cast(MI)); +MemoryLocation MemoryLocation::getForDest(const MemWriteIntrinsic *MI) { + return getForDest(cast(MI)); } -MemoryLocation MemoryLocation::getForDest(const AtomicMemIntrinsic *MI) { - return getForDest(cast(MI)); +MemoryLocation MemoryLocation::getForDest(const AtomicMemWriteIntrinsic *AMI) { + return getForDest(cast(AMI)); } -MemoryLocation MemoryLocation::getForDest(const AnyMemIntrinsic *MI) { +MemoryLocation MemoryLocation::getForDest(const AnyMemWriteIntrinsic *MWI) { auto Size = LocationSize::afterPointer(); - if (ConstantInt *C = dyn_cast(MI->getLength())) + if (ConstantInt *C = dyn_cast(MWI->getLength())) Size = LocationSize::precise(C->getValue().getZExtValue()); // memcpy/memmove can have AA tags. For memcpy, they apply // to both the source and the destination. AAMDNodes AATags; - MI->getAAMetadata(AATags); + MWI->getAAMetadata(AATags); - return MemoryLocation(MI->getRawDest(), Size, AATags); + return MemoryLocation(MWI->getRawDest(), Size, AATags); } MemoryLocation MemoryLocation::getForArgument(const CallBase *Call, diff --git a/llvm/lib/Analysis/StackSafetyAnalysis.cpp b/llvm/lib/Analysis/StackSafetyAnalysis.cpp --- a/llvm/lib/Analysis/StackSafetyAnalysis.cpp +++ b/llvm/lib/Analysis/StackSafetyAnalysis.cpp @@ -306,8 +306,8 @@ if (const auto *MTI = dyn_cast(MI)) { if (MTI->getRawSource() != U && MTI->getRawDest() != U) return ConstantRange::getEmpty(PointerSize); - } else { - if (MI->getRawDest() != U) + } else if (const auto *MWI = dyn_cast(MI)) { + if (MWI->getRawDest() != U) return ConstantRange::getEmpty(PointerSize); } diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -2139,12 +2139,12 @@ } // If this is a memcpy (or similar) then we may be able to improve the // alignment - if (MemIntrinsic *MI = dyn_cast(CI)) { - Align DestAlign = getKnownAlignment(MI->getDest(), *DL); - MaybeAlign MIDestAlign = MI->getDestAlign(); - if (!MIDestAlign || DestAlign > *MIDestAlign) - MI->setDestAlignment(DestAlign); - if (MemTransferInst *MTI = dyn_cast(MI)) { + if (MemWriteIntrinsic *MWI = dyn_cast(CI)) { + Align DestAlign = getKnownAlignment(MWI->getDest(), *DL); + MaybeAlign MWIDestAlign = MWI->getDestAlign(); + if (!MWIDestAlign || DestAlign > *MWIDestAlign) + MWI->setDestAlignment(DestAlign); + if (MemTransferInst *MTI = dyn_cast(MWI)) { MaybeAlign MTISrcAlign = MTI->getSourceAlign(); Align SrcAlign = getKnownAlignment(MTI->getSource(), *DL); if (!MTISrcAlign || SrcAlign > *MTISrcAlign) diff --git a/llvm/lib/CodeGen/SafeStack.cpp b/llvm/lib/CodeGen/SafeStack.cpp --- a/llvm/lib/CodeGen/SafeStack.cpp +++ b/llvm/lib/CodeGen/SafeStack.cpp @@ -267,8 +267,8 @@ if (auto MTI = dyn_cast(MI)) { if (MTI->getRawSource() != U && MTI->getRawDest() != U) return true; - } else { - if (MI->getRawDest() != U) + } else if (auto MWI = dyn_cast(MI)) { + if (MWI->getRawDest() != U) return true; } diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp --- a/llvm/lib/IR/AutoUpgrade.cpp +++ b/llvm/lib/IR/AutoUpgrade.cpp @@ -3930,12 +3930,12 @@ Value *Args[4] = {CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(2), CI->getArgOperand(4)}; NewCall = Builder.CreateCall(NewFn, Args); - auto *MemCI = cast(NewCall); + auto *MemWriteIntr = cast(NewCall); // All mem intrinsics support dest alignment. const ConstantInt *Align = cast(CI->getArgOperand(3)); - MemCI->setDestAlignment(Align->getMaybeAlignValue()); + MemWriteIntr->setDestAlignment(Align->getMaybeAlignValue()); // Memcpy/Memmove also support source alignment. - if (auto *MTI = dyn_cast(MemCI)) + if (auto *MTI = dyn_cast(MemWriteIntr)) MTI->setSourceAlignment(Align->getMaybeAlignValue()); break; } diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp --- a/llvm/lib/IR/Verifier.cpp +++ b/llvm/lib/IR/Verifier.cpp @@ -4671,28 +4671,27 @@ case Intrinsic::memcpy_inline: case Intrinsic::memmove: case Intrinsic::memset: { - const auto *MI = cast(&Call); + const auto *MWI = cast(&Call); auto IsValidAlignment = [&](unsigned Alignment) -> bool { return Alignment == 0 || isPowerOf2_32(Alignment); }; - Assert(IsValidAlignment(MI->getDestAlignment()), + Assert(IsValidAlignment(MWI->getDestAlignment()), "alignment of arg 0 of memory intrinsic must be 0 or a power of 2", Call); - if (const auto *MTI = dyn_cast(MI)) { + if (const auto *MTI = dyn_cast(MWI)) { Assert(IsValidAlignment(MTI->getSourceAlignment()), "alignment of arg 1 of memory intrinsic must be 0 or a power of 2", Call); } - break; } case Intrinsic::memcpy_element_unordered_atomic: case Intrinsic::memmove_element_unordered_atomic: case Intrinsic::memset_element_unordered_atomic: { - const auto *AMI = cast(&Call); + const auto *AMWI = cast(&Call); ConstantInt *ElementSizeCI = - cast(AMI->getRawElementSizeInBytes()); + cast(AMWI->getRawElementSizeInBytes()); const APInt &ElementSizeVal = ElementSizeCI->getValue(); Assert(ElementSizeVal.isPowerOf2(), "element size of the element-wise atomic memory intrinsic " @@ -4702,10 +4701,10 @@ auto IsValidAlignment = [&](uint64_t Alignment) { return isPowerOf2_64(Alignment) && ElementSizeVal.ule(Alignment); }; - uint64_t DstAlignment = AMI->getDestAlignment(); + uint64_t DstAlignment = AMWI->getDestAlignment(); Assert(IsValidAlignment(DstAlignment), "incorrect alignment of the destination argument", Call); - if (const auto *AMT = dyn_cast(AMI)) { + if (const auto *AMT = dyn_cast(AMWI)) { uint64_t SrcAlignment = AMT->getSourceAlignment(); Assert(IsValidAlignment(SrcAlignment), "incorrect alignment of the source argument", Call); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPerfHintAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPerfHintAnalysis.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUPerfHintAnalysis.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUPerfHintAnalysis.cpp @@ -132,8 +132,8 @@ if (auto AI = dyn_cast(Inst)) { return AI->getPointerOperand(); } - if (auto MI = dyn_cast(Inst)) { - return MI->getRawDest(); + if (auto MWI = dyn_cast(Inst)) { + return MWI->getRawDest(); } return nullptr; diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp --- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp +++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp @@ -349,9 +349,10 @@ GEP->eraseFromParent(); Changed = true; } - } else if (MemIntrinsic *MI = dyn_cast(U)) { // memset/cpy/mv - if (MI->getRawDest() == V) { - MI->eraseFromParent(); + } else if (MemWriteIntrinsic *MWI = dyn_cast(U)) { + // memset/cpy/mv + if (MWI->getRawDest() == V) { + MWI->eraseFromParent(); Changed = true; } diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -2613,8 +2613,8 @@ case Intrinsic::memmove: case Intrinsic::memcpy: case Intrinsic::memset: { - MemIntrinsic *MI = cast(II); - if (MI->isVolatile() || MI->getRawDest() != PI) + MemWriteIntrinsic *MWI = cast(II); + if (MWI->isVolatile() || MWI->getRawDest() != PI) return false; LLVM_FALLTHROUGH; } diff --git a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp --- a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp +++ b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp @@ -278,22 +278,22 @@ SI->setAlignment(NewAlignment); ++NumStoreAlignChanged; } - } else if (MemIntrinsic *MI = dyn_cast(J)) { + } else if (MemWriteIntrinsic *MWI = dyn_cast(J)) { if (!isValidAssumeForContext(ACall, J, DT)) continue; Align NewDestAlignment = - getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MI->getDest(), SE); + getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MWI->getDest(), SE); LLVM_DEBUG(dbgs() << "\tmem inst: " << DebugStr(NewDestAlignment) << "\n";); - if (NewDestAlignment > *MI->getDestAlign()) { - MI->setDestAlignment(NewDestAlignment); + if (NewDestAlignment > *MWI->getDestAlign()) { + MWI->setDestAlignment(NewDestAlignment); ++NumMemIntAlignChanged; } // For memory transfers, there is also a source alignment that // can be set. - if (MemTransferInst *MTI = dyn_cast(MI)) { + if (MemTransferInst *MTI = dyn_cast(MWI)) { Align NewSrcAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MTI->getSource(), SE); diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp --- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp +++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp @@ -214,8 +214,8 @@ return MemoryLocation::get(SI); // memcpy/memmove/memset. - if (auto *MI = dyn_cast(Inst)) - return MemoryLocation::getForDest(MI); + if (auto *MWI = dyn_cast(Inst)) + return MemoryLocation::getForDest(MWI); if (IntrinsicInst *II = dyn_cast(Inst)) { switch (II->getIntrinsicID()) { @@ -578,7 +578,7 @@ static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierStart, uint64_t &EarlierSize, int64_t LaterStart, uint64_t LaterSize, bool IsOverwriteEnd) { - auto *EarlierIntrinsic = cast(EarlierWrite); + auto *EarlierIntrinsic = cast(EarlierWrite); Align PrefAlign = EarlierIntrinsic->getDestAlign().valueOrOne(); // We assume that memet/memcpy operates in chunks of the "largest" native @@ -1104,8 +1104,8 @@ if (!I->mayWriteToMemory()) return None; - if (auto *MTI = dyn_cast(I)) - return {MemoryLocation::getForDest(MTI)}; + if (auto *MWI = dyn_cast(I)) + return {MemoryLocation::getForDest(MWI)}; if (auto *CB = dyn_cast(I)) { // If the functions may write to memory we do not know about, bail out. diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp --- a/llvm/lib/Transforms/Scalar/GVN.cpp +++ b/llvm/lib/Transforms/Scalar/GVN.cpp @@ -199,9 +199,9 @@ return Res; } - static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset = 0) { + static AvailableValue getMI(MemWriteIntrinsic *MWI, unsigned Offset = 0) { AvailableValue Res; - Res.Val.setPointer(MI); + Res.Val.setPointer(MWI); Res.Val.setInt(MemIntrin); Res.Offset = Offset; return Res; @@ -238,9 +238,9 @@ return cast(Val.getPointer()); } - MemIntrinsic *getMemIntrinValue() const { + MemWriteIntrinsic *getMemIntrinValue() const { assert(isMemIntrinValue() && "Wrong accessor"); - return cast(Val.getPointer()); + return cast(Val.getPointer()); } /// Emit code at the specified insertion point to adjust the value defined @@ -1061,7 +1061,7 @@ // If the clobbering value is a memset/memcpy/memmove, see if we can // forward a value on from it. - if (MemIntrinsic *DepMI = dyn_cast(DepInst)) { + if (MemWriteIntrinsic *DepMI = dyn_cast(DepInst)) { if (Address && !Load->isAtomic()) { int Offset = analyzeLoadFromClobberingMemInst(Load->getType(), Address, DepMI, DL); diff --git a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp --- a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp +++ b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp @@ -450,12 +450,12 @@ PushPtrOperand(RMW->getPointerOperand()); else if (auto *CmpX = dyn_cast(&I)) PushPtrOperand(CmpX->getPointerOperand()); - else if (auto *MI = dyn_cast(&I)) { + else if (auto *MWI = dyn_cast(&I)) { // For memset/memcpy/memmove, any pointer operand can be replaced. - PushPtrOperand(MI->getRawDest()); + PushPtrOperand(MWI->getRawDest()); // Handle 2nd operand for memcpy/memmove. - if (auto *MTI = dyn_cast(MI)) + if (auto *MTI = dyn_cast(MWI)) PushPtrOperand(MTI->getRawSource()); } else if (auto *II = dyn_cast(&I)) collectRewritableIntrinsicOperands(II, PostorderStack, Visited); diff --git a/llvm/lib/Transforms/Scalar/NewGVN.cpp b/llvm/lib/Transforms/Scalar/NewGVN.cpp --- a/llvm/lib/Transforms/Scalar/NewGVN.cpp +++ b/llvm/lib/Transforms/Scalar/NewGVN.cpp @@ -1468,7 +1468,7 @@ return createConstantExpression(PossibleConstant); } } - } else if (auto *DepMI = dyn_cast(DepInst)) { + } else if (auto *DepMI = dyn_cast(DepInst)) { int Offset = analyzeLoadFromClobberingMemInst(LoadType, LoadPtr, DepMI, DL); if (Offset >= 0) { if (auto *PossibleConstant = diff --git a/llvm/lib/Transforms/Utils/VNCoercion.cpp b/llvm/lib/Transforms/Utils/VNCoercion.cpp --- a/llvm/lib/Transforms/Utils/VNCoercion.cpp +++ b/llvm/lib/Transforms/Utils/VNCoercion.cpp @@ -365,29 +365,30 @@ } int analyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, - MemIntrinsic *MI, const DataLayout &DL) { + MemWriteIntrinsic *MWI, + const DataLayout &DL) { // If the mem operation is a non-constant size, we can't handle it. - ConstantInt *SizeCst = dyn_cast(MI->getLength()); + ConstantInt *SizeCst = dyn_cast(MWI->getLength()); if (!SizeCst) return -1; uint64_t MemSizeInBits = SizeCst->getZExtValue() * 8; // If this is memset, we just need to see if the offset is valid in the size // of the memset.. - if (MI->getIntrinsicID() == Intrinsic::memset) { + if (MWI->getIntrinsicID() == Intrinsic::memset) { if (DL.isNonIntegralPointerType(LoadTy->getScalarType())) { - auto *CI = dyn_cast(cast(MI)->getValue()); + auto *CI = dyn_cast(cast(MWI)->getValue()); if (!CI || !CI->isZero()) return -1; } - return analyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(), + return analyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MWI->getDest(), MemSizeInBits, DL); } // If we have a memcpy/memmove, the only case we can handle is if this is a // copy from constant memory. In that case, we can read directly from the // constant memory. - MemTransferInst *MTI = cast(MI); + MemTransferInst *MTI = cast(MWI); Constant *Src = dyn_cast(MTI->getSource()); if (!Src) @@ -398,7 +399,7 @@ return -1; // See if the access is within the bounds of the transfer. - int Offset = analyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(), + int Offset = analyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MWI->getDest(), MemSizeInBits, DL); if (Offset == -1) return Offset; @@ -543,7 +544,7 @@ } template -T *getMemInstValueForLoadHelper(MemIntrinsic *SrcInst, unsigned Offset, +T *getMemInstValueForLoadHelper(MemWriteIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, HelperClass &Helper, const DataLayout &DL) { LLVMContext &Ctx = LoadTy->getContext(); @@ -601,7 +602,7 @@ /// This function is called when we have a /// memdep query of a load that ends up being a clobbering mem intrinsic. -Value *getMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, +Value *getMemInstValueForLoad(MemWriteIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, Instruction *InsertPt, const DataLayout &DL) { IRBuilder<> Builder(InsertPt); @@ -609,7 +610,8 @@ LoadTy, Builder, DL); } -Constant *getConstantMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, +Constant *getConstantMemInstValueForLoad(MemWriteIntrinsic *SrcInst, + unsigned Offset, Type *LoadTy, const DataLayout &DL) { // The only case analyzeLoadFromClobberingMemInst cannot be converted to a // constant is when it's a memset of a non-constant.