Index: include/llvm/Analysis/TargetTransformInfo.h =================================================================== --- include/llvm/Analysis/TargetTransformInfo.h +++ include/llvm/Analysis/TargetTransformInfo.h @@ -39,20 +39,6 @@ class User; class Value; -/// \brief Information about a load/store intrinsic defined by the target. -struct MemIntrinsicInfo { - MemIntrinsicInfo() - : ReadMem(false), WriteMem(false), Vol(false), MatchingId(0), - NumMemRefs(0), PtrVal(nullptr) {} - bool ReadMem; - bool WriteMem; - bool Vol; - // Same Id is set by the target for corresponding load/store intrinsics. - unsigned short MatchingId; - int NumMemRefs; - Value *PtrVal; -}; - /// \brief This pass provides access to the codegen interfaces that are needed /// for IR-level transformations. class TargetTransformInfo { @@ -476,19 +462,40 @@ /// any callee-saved registers, so would require a spill and fill. unsigned getCostOfKeepingLiveOverCall(ArrayRef Tys) const; - /// \returns True if the intrinsic is a supported memory intrinsic. Info - /// will contain additional information - whether the intrinsic may write - /// or read to memory, volatility and the pointer. Info is undefined - /// if false is returned. - bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const; + /// \returns True if the target intrinsic behaves like a load instruction. + /// Must have one memory operand. + bool isTargetIntrinsicLikeLoad(const IntrinsicInst *II) const; + + /// \returns True if the target intrinsic behaves like a store instruction. + /// Must have one memory operand. + bool isTargetIntrinsicLikeStore(const IntrinsicInst *II) const; + + /// \returns True if the target memory intrinsic results in atomic memory + /// access. + bool isTargetIntrinsicAtomic(const IntrinsicInst *II) const; + + /// \returns True if the target memory intrinsic results in volatile memory + /// access. + bool isTargetIntrinsicVolatile(const IntrinsicInst *II) const; /// \returns A value which is the result of the given memory intrinsic. New /// instructions may be created to extract the result from the given intrinsic /// memory operation. Returns nullptr if the target cannot create a result /// from the given intrinsic. - Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, - Type *ExpectedType) const; + Value *getOrCreateResultFromTargetIntrinsic(IntrinsicInst *II, + const Type *ExpectedType) const; + + /// \returns A pointer operand of the target memory intrinsic. + Value *getTargetIntrinsicPointerOperand(const IntrinsicInst *II) const; + + /// \returns The Id indicating compatibility with another target memory + /// intrinsic. + /// A matching Id for a store memory intrinsic is the same as the matching Id + /// for its mirror load intrinsic. + unsigned getTargetIntrinsicMatchingId(const IntrinsicInst *II) const; + /// \returns True if the target memory intrinsic may read memory. + bool mayTargetIntrinsicReadFromMemory(const IntrinsicInst *II) const; /// @} private: @@ -578,10 +585,16 @@ virtual unsigned getNumberOfParts(Type *Tp) = 0; virtual unsigned getAddressComputationCost(Type *Ty, bool IsComplex) = 0; virtual unsigned getCostOfKeepingLiveOverCall(ArrayRef Tys) = 0; - virtual bool getTgtMemIntrinsic(IntrinsicInst *Inst, - MemIntrinsicInfo &Info) = 0; - virtual Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, - Type *ExpectedType) = 0; + virtual bool isTargetIntrinsicLikeLoad(const IntrinsicInst *II) = 0; + virtual bool isTargetIntrinsicLikeStore(const IntrinsicInst *II) = 0; + virtual bool isTargetIntrinsicAtomic(const IntrinsicInst *II) = 0; + virtual bool isTargetIntrinsicVolatile(const IntrinsicInst *II) = 0; + virtual Value * + getOrCreateResultFromTargetIntrinsic(IntrinsicInst *II, + const Type *ExpectedType) = 0; + virtual Value *getTargetIntrinsicPointerOperand(const IntrinsicInst *II) = 0; + virtual unsigned getTargetIntrinsicMatchingId(const IntrinsicInst *II) = 0; + virtual bool mayTargetIntrinsicReadFromMemory(const IntrinsicInst *II) = 0; }; template @@ -745,13 +758,31 @@ unsigned getCostOfKeepingLiveOverCall(ArrayRef Tys) override { return Impl.getCostOfKeepingLiveOverCall(Tys); } - bool getTgtMemIntrinsic(IntrinsicInst *Inst, - MemIntrinsicInfo &Info) override { - return Impl.getTgtMemIntrinsic(Inst, Info); + bool isTargetIntrinsicLikeLoad(const IntrinsicInst *II) override { + return Impl.isTargetIntrinsicLikeLoad(II); + } + bool isTargetIntrinsicLikeStore(const IntrinsicInst *II) override { + return Impl.isTargetIntrinsicLikeStore(II); + } + bool isTargetIntrinsicAtomic(const IntrinsicInst *II) override { + return Impl.isTargetIntrinsicAtomic(II); + } + bool isTargetIntrinsicVolatile(const IntrinsicInst *II) override { + return Impl.isTargetIntrinsicVolatile(II); + } + Value * + getOrCreateResultFromTargetIntrinsic(IntrinsicInst *II, + const Type *ExpectedType) override { + return Impl.getOrCreateResultFromTargetIntrinsic(II, ExpectedType); + } + Value *getTargetIntrinsicPointerOperand(const IntrinsicInst *II) override { + return Impl.getTargetIntrinsicPointerOperand(II); + } + unsigned getTargetIntrinsicMatchingId(const IntrinsicInst *II) override { + return Impl.getTargetIntrinsicMatchingId(II); } - Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, - Type *ExpectedType) override { - return Impl.getOrCreateResultFromMemIntrinsic(Inst, ExpectedType); + bool mayTargetIntrinsicReadFromMemory(const IntrinsicInst *II) override { + return Impl.mayTargetIntrinsicReadFromMemory(II); } }; Index: include/llvm/Analysis/TargetTransformInfoImpl.h =================================================================== --- include/llvm/Analysis/TargetTransformInfoImpl.h +++ include/llvm/Analysis/TargetTransformInfoImpl.h @@ -21,6 +21,7 @@ #include "llvm/IR/Function.h" #include "llvm/IR/Operator.h" #include "llvm/IR/Type.h" +#include "llvm/Support/raw_ostream.h" namespace llvm { @@ -315,14 +316,46 @@ unsigned getCostOfKeepingLiveOverCall(ArrayRef Tys) { return 0; } - bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) { - return false; + bool isTargetIntrinsicLikeLoad(const IntrinsicInst *II) { return false; } + + bool isTargetIntrinsicLikeStore(const IntrinsicInst *II) { return false; } + + bool isTargetIntrinsicAtomic(const IntrinsicInst *II) { + assert(false && + "Target does not support querying of target memory intrinsics."); + return true; + } + + bool isTargetIntrinsicVolatile(const IntrinsicInst *II) { + assert(false && + "Target does not support querying of target memory intrinsics."); + return true; + } + + Value *getOrCreateResultFromTargetIntrinsic(IntrinsicInst *II, + const Type *ExpectedType) { + assert(false && + "Target does not support querying of target memory intrinsics."); + return nullptr; } - Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, - Type *ExpectedType) { + Value *getTargetIntrinsicPointerOperand(const IntrinsicInst *II) { + assert(false && + "Target does not support querying of target memory intrinsics."); return nullptr; } + + unsigned getTargetIntrinsicMatchingId(const IntrinsicInst *II) { + assert(false && + "Target does not support querying of target memory intrinsics."); + return 0; + } + + bool mayTargetIntrinsicReadFromMemory(const IntrinsicInst *II) { + assert(false && + "Target does not support querying of target memory intrinsics."); + return true; + } }; /// \brief CRTP base class for use as a mix-in that aids implementing Index: include/llvm/IR/LoadStoreSite.h =================================================================== --- /dev/null +++ include/llvm/IR/LoadStoreSite.h @@ -0,0 +1,169 @@ +//===- LoadStoreSite.h - Abstract Load and Store instructions ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines LoadSite and StoreSite classes that provide common +// interface to deal with regular load and store instructions, as well target +// specific intrinsic load/store instructions. +// +// Example use: +// if (LoadSite LS = LoadSite(Inst, TTI)) { +// const Value *Ptr = LS.getPointerOperand(); +// .... +// } +// +#ifndef LLVM_IR_LOADSTORESITE_H +#define LLVM_IR_LOADSTORESITE_H + +#include "llvm/ADT/PointerIntPair.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/Analysis/TargetTransformInfo.h" + +namespace llvm { + +template +class LoadStoreSiteBase { + // The bool bit is true if I is holding a pointer to NativeInstr, and false + // if I is holding a pointer to IntrinsicInst. When I is holding nullptr, + // the bool bit is ignored. + PointerIntPair I; + const TargetTransformInfo *TTI; + + void setInstruction(ValTy *V) { + if (NativeInstr *Inst = dyn_cast(V)) { + I.setPointer(Inst); + I.setInt(true); + } else if (IntrinsicInstTy *II = dyn_cast(V)) { + if (Derived::isTargetIntrinsicSupported(II, this->TTI)) + I.setPointer(II); + I.setInt(false); + } + } + bool isTargetIntrinsic() const { + assert(I.getPointer() && "Instruction not set"); + return !I.getInt(); + } + +public: + LoadStoreSiteBase(ValTy *V, const TargetTransformInfo &TTI) + : I(nullptr, false), TTI(&TTI) { + if (V != nullptr) + setInstruction(V); + } + + /// isSimple - true if the load/store instruction or intrinsic is not volatile + /// or atomic. + bool isSimple() const { + InstrTy *Inst = I.getPointer(); + if (isTargetIntrinsic()) + return !TTI->isTargetIntrinsicAtomic(cast(Inst)) && + !TTI->isTargetIntrinsicVolatile(cast(Inst)); + return cast(Inst)->isSimple(); + } + + /// getPointerOperand - retrieve the memory location from a load/store + /// instruction or intrinsic. + const Value *getPointerOperand() const { + InstrTy *Inst = I.getPointer(); + if (isTargetIntrinsic()) + return TTI->getTargetIntrinsicPointerOperand(cast(Inst)); + return cast(Inst)->getPointerOperand(); + } + + LoadStoreSiteBase &operator=(InstrTy *Inst) { + I.setPointer(nullptr); + I.setInt(false); + + if (Inst == nullptr) + return *this; + + setInstruction(Inst); + return *this; + } + + explicit operator bool() const { return I.getPointer() != nullptr; } + InstrTy *operator->() const { return I.getPointer(); } + InstrTy *getInstruction() const { return I.getPointer(); } + bool mayReadFromMemory() const { + InstrTy *Inst = I.getPointer(); + if (isTargetIntrinsic()) + return TTI->mayTargetIntrinsicReadFromMemory(cast(Inst)); + return Inst->mayReadFromMemory(); + } + + /// isCompatible - true if Other is a compatible load or store instruction. + /// Two load or store instructions are compatible if they are the same + /// type of instruction. Given a load instruction, a compatible store + /// instruction is the one that is a mirror of the load instruction. + bool isCompatible(const Value *Other) const { + if (!isTargetIntrinsic()) { + if (const LoadInst *LI = dyn_cast(Other)) + return getPointerOperand()->getType() == + LI->getPointerOperand()->getType(); + else if (const StoreInst *SI = dyn_cast(Other)) + return getPointerOperand()->getType() == + SI->getPointerOperand()->getType(); + return false; + } + if (isa(Other)) { + assert(isa(I.getPointer()) && "Expecting intrinsic"); + if (TTI->getTargetIntrinsicMatchingId(cast(Other)) == + TTI->getTargetIntrinsicMatchingId( + cast(I.getPointer()))) + return true; + return false; + } + return false; + } + + /// MatchInstructionAndLocation - true if Other is a compatible load or store + /// instruction, reading + /// from, or writing to the same memory location. + bool MatchInstructionAndLocation(const LoadStoreSiteBase &Other) const { + return getPointerOperand() == Other.getPointerOperand() && + isCompatible(Other.getInstruction()); + } +}; + +class LoadSite : public LoadStoreSiteBase { + typedef LoadStoreSiteBase Base; + friend class LoadStoreSiteBase; + static bool isTargetIntrinsicSupported(const IntrinsicInst *II, + const TargetTransformInfo *TTI) { + return TTI->isTargetIntrinsicLikeLoad(II); + } + +public: + LoadSite(Value *V, const TargetTransformInfo &TTI) : Base(V, TTI) {} + LoadSite &operator=(Instruction *Inst) { + Base::operator=(Inst); + return *this; + } +}; + +class StoreSite : public LoadStoreSiteBase { + typedef LoadStoreSiteBase Base; + friend class LoadStoreSiteBase; + static bool isTargetIntrinsicSupported(const IntrinsicInst *II, + const TargetTransformInfo *TTI) { + return TTI->isTargetIntrinsicLikeStore(II); + } + + +public: + StoreSite(Value *V, const TargetTransformInfo &TTI) : Base(V, TTI) {} + StoreSite &operator=(Instruction *Inst) { + Base::operator=(Inst); + return *this; + } +}; +} +#endif Index: lib/Analysis/TargetTransformInfo.cpp =================================================================== --- lib/Analysis/TargetTransformInfo.cpp +++ lib/Analysis/TargetTransformInfo.cpp @@ -261,14 +261,44 @@ return TTIImpl->getCostOfKeepingLiveOverCall(Tys); } -bool TargetTransformInfo::getTgtMemIntrinsic(IntrinsicInst *Inst, - MemIntrinsicInfo &Info) const { - return TTIImpl->getTgtMemIntrinsic(Inst, Info); +bool TargetTransformInfo::isTargetIntrinsicLikeLoad( + const IntrinsicInst *II) const { + return TTIImpl->isTargetIntrinsicLikeLoad(II); } -Value *TargetTransformInfo::getOrCreateResultFromMemIntrinsic( - IntrinsicInst *Inst, Type *ExpectedType) const { - return TTIImpl->getOrCreateResultFromMemIntrinsic(Inst, ExpectedType); +bool TargetTransformInfo::isTargetIntrinsicLikeStore( + const IntrinsicInst *II) const { + return TTIImpl->isTargetIntrinsicLikeStore(II); +} + +bool TargetTransformInfo::isTargetIntrinsicAtomic( + const IntrinsicInst *II) const { + return TTIImpl->isTargetIntrinsicAtomic(II); +} + +bool TargetTransformInfo::isTargetIntrinsicVolatile( + const IntrinsicInst *II) const { + return TTIImpl->isTargetIntrinsicVolatile(II); +} + +Value *TargetTransformInfo::getOrCreateResultFromTargetIntrinsic( + IntrinsicInst *II, const Type *ExpectedType) const { + return TTIImpl->getOrCreateResultFromTargetIntrinsic(II, ExpectedType); +} + +Value *TargetTransformInfo::getTargetIntrinsicPointerOperand( + const IntrinsicInst *II) const { + return TTIImpl->getTargetIntrinsicPointerOperand(II); +} + +unsigned TargetTransformInfo::getTargetIntrinsicMatchingId( + const IntrinsicInst *II) const { + return TTIImpl->getTargetIntrinsicMatchingId(II); +} + +bool TargetTransformInfo::mayTargetIntrinsicReadFromMemory( + const IntrinsicInst *II) const { + return TTIImpl->mayTargetIntrinsicReadFromMemory(II); } TargetTransformInfo::Concept::~Concept() {} Index: lib/Target/AArch64/AArch64TargetTransformInfo.h =================================================================== --- lib/Target/AArch64/AArch64TargetTransformInfo.h +++ lib/Target/AArch64/AArch64TargetTransformInfo.h @@ -134,11 +134,15 @@ void getUnrollingPreferences(Loop *L, TTI::UnrollingPreferences &UP); - Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, - Type *ExpectedType); - - bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info); - + Value *getTargetIntrinsicPointerOperand(const IntrinsicInst *II); + bool isTargetIntrinsicLikeLoad(const IntrinsicInst *II); + bool isTargetIntrinsicLikeStore(const IntrinsicInst *II); + bool isTargetIntrinsicVolatile(const IntrinsicInst *II); + bool isTargetIntrinsicAtomic(const IntrinsicInst *II); + Value *getOrCreateResultFromTargetIntrinsic(IntrinsicInst *Inst, + const Type *ExpectedType); + unsigned getTargetIntrinsicMatchingId(const IntrinsicInst *II); + bool mayTargetIntrinsicReadFromMemory(const IntrinsicInst *II); /// @} }; Index: lib/Target/AArch64/AArch64TargetTransformInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -440,8 +440,41 @@ UP.PartialOptSizeThreshold = 0; } -Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, - Type *ExpectedType) { +bool AArch64TTIImpl::isTargetIntrinsicLikeLoad(const IntrinsicInst *Inst) { + switch (Inst->getIntrinsicID()) { + default: + return false; + case Intrinsic::aarch64_neon_ld2: + case Intrinsic::aarch64_neon_ld3: + case Intrinsic::aarch64_neon_ld4: + return true; + } +} + +bool AArch64TTIImpl::isTargetIntrinsicLikeStore(const IntrinsicInst *Inst) { + switch (Inst->getIntrinsicID()) { + default: + return false; + case Intrinsic::aarch64_neon_st2: + case Intrinsic::aarch64_neon_st3: + case Intrinsic::aarch64_neon_st4: + return true; + } +} + +// AArch64 load/store intrinsics are not atomic. +bool AArch64TTIImpl::isTargetIntrinsicAtomic(const IntrinsicInst *Inst) { + return false; +} + +// AArch64 load/store intrinsics are not volatile. +bool AArch64TTIImpl::isTargetIntrinsicVolatile(const IntrinsicInst *Inst) { + return false; +} + +Value * +AArch64TTIImpl::getOrCreateResultFromTargetIntrinsic(IntrinsicInst *Inst, + const Type *ExpectedType) { switch (Inst->getIntrinsicID()) { default: return nullptr; @@ -449,7 +482,7 @@ case Intrinsic::aarch64_neon_st3: case Intrinsic::aarch64_neon_st4: { // Create a struct type - StructType *ST = dyn_cast(ExpectedType); + const StructType *ST = dyn_cast(ExpectedType); if (!ST) return nullptr; unsigned NumElts = Inst->getNumArgOperands() - 1; @@ -459,7 +492,7 @@ if (Inst->getArgOperand(i)->getType() != ST->getElementType(i)) return nullptr; } - Value *Res = UndefValue::get(ExpectedType); + Value *Res = UndefValue::get(const_cast(ExpectedType)); IRBuilder<> Builder(Inst); for (unsigned i = 0, e = NumElts; i != e; ++i) { Value *L = Inst->getArgOperand(i); @@ -476,46 +509,38 @@ } } -bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst, - MemIntrinsicInfo &Info) { - switch (Inst->getIntrinsicID()) { - default: - break; - case Intrinsic::aarch64_neon_ld2: - case Intrinsic::aarch64_neon_ld3: - case Intrinsic::aarch64_neon_ld4: - Info.ReadMem = true; - Info.WriteMem = false; - Info.Vol = false; - Info.NumMemRefs = 1; - Info.PtrVal = Inst->getArgOperand(0); - break; - case Intrinsic::aarch64_neon_st2: - case Intrinsic::aarch64_neon_st3: - case Intrinsic::aarch64_neon_st4: - Info.ReadMem = false; - Info.WriteMem = true; - Info.Vol = false; - Info.NumMemRefs = 1; - Info.PtrVal = Inst->getArgOperand(Inst->getNumArgOperands() - 1); - break; - } +Value * +AArch64TTIImpl::getTargetIntrinsicPointerOperand(const IntrinsicInst *Inst) { + if (isTargetIntrinsicLikeLoad(Inst)) + return Inst->getArgOperand(0); + assert(isTargetIntrinsicLikeStore(Inst) && + "Expecting a target store intrinsic."); + return Inst->getArgOperand(Inst->getNumArgOperands() - 1); +} +unsigned +AArch64TTIImpl::getTargetIntrinsicMatchingId(const IntrinsicInst *Inst) { switch (Inst->getIntrinsicID()) { default: - return false; + assert( + false && + "Calling AArch64TTIImpl::getMatchingId on an unsupported intrinsic."); case Intrinsic::aarch64_neon_ld2: case Intrinsic::aarch64_neon_st2: - Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS; - break; + return VECTOR_LDST_TWO_ELEMENTS; case Intrinsic::aarch64_neon_ld3: case Intrinsic::aarch64_neon_st3: - Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS; - break; + return VECTOR_LDST_THREE_ELEMENTS; case Intrinsic::aarch64_neon_ld4: case Intrinsic::aarch64_neon_st4: - Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS; - break; + return VECTOR_LDST_FOUR_ELEMENTS; } - return true; +} + +bool AArch64TTIImpl::mayTargetIntrinsicReadFromMemory( + const IntrinsicInst *Inst) { + if (isTargetIntrinsicLikeLoad(Inst)) + return true; + assert(isTargetIntrinsicLikeStore(Inst)); + return false; } Index: lib/Transforms/Scalar/EarlyCSE.cpp =================================================================== --- lib/Transforms/Scalar/EarlyCSE.cpp +++ lib/Transforms/Scalar/EarlyCSE.cpp @@ -23,6 +23,7 @@ #include "llvm/IR/Dominators.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/LoadStoreSite.h" #include "llvm/IR/PatternMatch.h" #include "llvm/Pass.h" #include "llvm/Support/Debug.h" @@ -290,9 +291,9 @@ /// CSE loads with other loads that have no intervening store. typedef RecyclingAllocator< BumpPtrAllocator, - ScopedHashTableVal>> + ScopedHashTableVal>> LoadMapAllocator; - typedef ScopedHashTable, + typedef ScopedHashTable, DenseMapInfo, LoadMapAllocator> LoadHTType; LoadHTType AvailableLoads; @@ -376,64 +377,6 @@ bool Processed; }; - /// \brief Wrapper class to handle memory instructions, including loads, - /// stores and intrinsic loads and stores defined by the target. - class ParseMemoryInst { - public: - ParseMemoryInst(Instruction *Inst, const TargetTransformInfo &TTI) - : Load(false), Store(false), Vol(false), MayReadFromMemory(false), - MayWriteToMemory(false), MatchingId(-1), Ptr(nullptr) { - MayReadFromMemory = Inst->mayReadFromMemory(); - MayWriteToMemory = Inst->mayWriteToMemory(); - if (IntrinsicInst *II = dyn_cast(Inst)) { - MemIntrinsicInfo Info; - if (!TTI.getTgtMemIntrinsic(II, Info)) - return; - if (Info.NumMemRefs == 1) { - Store = Info.WriteMem; - Load = Info.ReadMem; - MatchingId = Info.MatchingId; - MayReadFromMemory = Info.ReadMem; - MayWriteToMemory = Info.WriteMem; - Vol = Info.Vol; - Ptr = Info.PtrVal; - } - } else if (LoadInst *LI = dyn_cast(Inst)) { - Load = true; - Vol = !LI->isSimple(); - Ptr = LI->getPointerOperand(); - } else if (StoreInst *SI = dyn_cast(Inst)) { - Store = true; - Vol = !SI->isSimple(); - Ptr = SI->getPointerOperand(); - } - } - bool isLoad() { return Load; } - bool isStore() { return Store; } - bool isVolatile() { return Vol; } - bool isMatchingMemLoc(const ParseMemoryInst &Inst) { - return Ptr == Inst.Ptr && MatchingId == Inst.MatchingId; - } - bool isValid() { return Ptr != nullptr; } - int getMatchingId() { return MatchingId; } - Value *getPtr() { return Ptr; } - bool mayReadFromMemory() { return MayReadFromMemory; } - bool mayWriteToMemory() { return MayWriteToMemory; } - - private: - bool Load; - bool Store; - bool Vol; - bool MayReadFromMemory; - bool MayWriteToMemory; - // For regular (non-intrinsic) loads/stores, this is set to -1. For - // intrinsic loads/stores, the id is retrieved from the corresponding - // field in the MemIntrinsicInfo structure. That field contains - // non-negative values only. - int MatchingId; - Value *Ptr; - }; - bool processNode(DomTreeNode *Node); Value *getOrCreateResult(Value *Inst, Type *ExpectedType) const { @@ -442,8 +385,8 @@ else if (StoreInst *SI = dyn_cast(Inst)) return SI->getValueOperand(); assert(isa(Inst) && "Instruction not supported"); - return TTI.getOrCreateResultFromMemIntrinsic(cast(Inst), - ExpectedType); + return TTI.getOrCreateResultFromTargetIntrinsic(cast(Inst), + ExpectedType); } }; } @@ -464,7 +407,7 @@ /// as long as there in no instruction that reads memory. If we see a store /// to the same location, we delete the dead store. This zaps trivial dead /// stores which can occur in bitfield code among other things. - Instruction *LastStore = nullptr; + StoreSite LastStore(nullptr, TTI); bool Changed = false; const DataLayout &DL = BB->getModule()->getDataLayout(); @@ -520,11 +463,10 @@ continue; } - ParseMemoryInst MemInst(Inst, TTI); // If this is a non-volatile load, process it. - if (MemInst.isValid() && MemInst.isLoad()) { + if (LoadSite LS = LoadSite(Inst, TTI)) { // Ignore volatile loads. - if (MemInst.isVolatile()) { + if (!LS.isSimple()) { LastStore = nullptr; // Don't CSE across synchronization boundaries. if (Inst->mayWriteToMemory()) @@ -535,8 +477,9 @@ // If we have an available version of this load, and if it is the right // generation, replace this instruction. std::pair InVal = - AvailableLoads.lookup(MemInst.getPtr()); - if (InVal.first != nullptr && InVal.second == CurrentGeneration) { + AvailableLoads.lookup(LS.getPointerOperand()); + if (InVal.first != nullptr && InVal.second == CurrentGeneration && + LS.isCompatible(InVal.first)) { Value *Op = getOrCreateResult(InVal.first, Inst->getType()); if (Op != nullptr) { DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst @@ -551,8 +494,9 @@ } // Otherwise, remember that we have this instruction. - AvailableLoads.insert(MemInst.getPtr(), std::pair( - Inst, CurrentGeneration)); + AvailableLoads.insert( + LS.getPointerOperand(), + std::pair(Inst, CurrentGeneration)); LastStore = nullptr; continue; } @@ -562,8 +506,8 @@ // memory. The target may override this (e.g. so that a store intrinsic // does not read from memory, and thus will be treated the same as a // regular store for commoning purposes). - if (Inst->mayReadFromMemory() && - !(MemInst.isValid() && !MemInst.mayReadFromMemory())) + StoreSite SS(Inst, TTI); + if (Inst->mayReadFromMemory() && !(SS && !SS.mayReadFromMemory())) LastStore = nullptr; // If this is a read-only call, process it. @@ -594,14 +538,14 @@ if (Inst->mayWriteToMemory()) { ++CurrentGeneration; - if (MemInst.isValid() && MemInst.isStore()) { + if (SS) { // We do a trivial form of DSE if there are two stores to the same // location with no intervening loads. Delete the earlier store. if (LastStore) { - ParseMemoryInst LastStoreMemInst(LastStore, TTI); - if (LastStoreMemInst.isMatchingMemLoc(MemInst)) { - DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore - << " due to: " << *Inst << '\n'); + if (LastStore.MatchInstructionAndLocation(SS)) { + DEBUG(dbgs() << "EarlyCSE DEAD STORE: " + << *LastStore.getInstruction() << " due to: " << *Inst + << '\n'); LastStore->eraseFromParent(); Changed = true; ++NumDSE; @@ -615,11 +559,12 @@ // version of the pointer. It is safe to forward from volatile stores // to non-volatile loads, so we don't have to check for volatility of // the store. - AvailableLoads.insert(MemInst.getPtr(), std::pair( - Inst, CurrentGeneration)); + AvailableLoads.insert(SS.getPointerOperand(), + std::pair(SS.getInstruction(), + CurrentGeneration)); // Remember that this was the last store we saw for DSE. - if (!MemInst.isVolatile()) + if (SS.isSimple()) LastStore = Inst; } }