diff --git a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp --- a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp +++ b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp @@ -697,29 +697,35 @@ public: ParseMemoryInst(Instruction *Inst, const TargetTransformInfo &TTI) : Inst(Inst) { - if (IntrinsicInst *II = dyn_cast(Inst)) + if (IntrinsicInst *II = dyn_cast(Inst)) { if (TTI.getTgtMemIntrinsic(II, Info)) - IsTargetMemInst = true; + IntrID = II->getIntrinsicID(); + } } + Instruction *get() { return Inst; } + const Instruction *get() const { return Inst; } + bool isLoad() const { - if (IsTargetMemInst) return Info.ReadMem; + if (IntrID != 0) + return Info.ReadMem; return isa(Inst); } bool isStore() const { - if (IsTargetMemInst) return Info.WriteMem; + if (IntrID != 0) + return Info.WriteMem; return isa(Inst); } bool isAtomic() const { - if (IsTargetMemInst) + if (IntrID != 0) return Info.Ordering != AtomicOrdering::NotAtomic; return Inst->isAtomic(); } bool isUnordered() const { - if (IsTargetMemInst) + if (IntrID != 0) return Info.isUnordered(); if (LoadInst *LI = dyn_cast(Inst)) { @@ -732,7 +738,7 @@ } bool isVolatile() const { - if (IsTargetMemInst) + if (IntrID != 0) return Info.IsVolatile; if (LoadInst *LI = dyn_cast(Inst)) { @@ -762,27 +768,31 @@ // field in the MemIntrinsicInfo structure. That field contains // non-negative values only. int getMatchingId() const { - if (IsTargetMemInst) return Info.MatchingId; + if (IntrID != 0) + return Info.MatchingId; return -1; } Value *getPointerOperand() const { - if (IsTargetMemInst) return Info.PtrVal; + if (IntrID != 0) + return Info.PtrVal; return getLoadStorePointerOperand(Inst); } bool mayReadFromMemory() const { - if (IsTargetMemInst) return Info.ReadMem; + if (IntrID != 0) + return Info.ReadMem; return Inst->mayReadFromMemory(); } bool mayWriteToMemory() const { - if (IsTargetMemInst) return Info.WriteMem; + if (IntrID != 0) + return Info.WriteMem; return Inst->mayWriteToMemory(); } private: - bool IsTargetMemInst = false; + Intrinsic::ID IntrID = 0; MemIntrinsicInfo Info; Instruction *Inst; }; @@ -792,6 +802,9 @@ bool handleBranchCondition(Instruction *CondInst, const BranchInst *BI, const BasicBlock *BB, const BasicBlock *Pred); + Value *getMatchingValue(LoadValue &InVal, ParseMemoryInst &MemInst, + unsigned CurrentGeneration, bool InValFirst); + Value *getOrCreateResult(Value *Inst, Type *ExpectedType) const { if (auto *LI = dyn_cast(Inst)) return LI; @@ -954,6 +967,30 @@ return MadeChanges; } +Value *EarlyCSE::getMatchingValue(LoadValue &InVal, ParseMemoryInst &MemInst, + unsigned CurrentGeneration, bool InValFirst) { + // InValFirst is false for loads, true for stores. + + if (InVal.DefInst == nullptr) + return nullptr; + if (InVal.MatchingId != MemInst.getMatchingId()) + return nullptr; + // We don't yet handle removing loads with ordering of any kind. + if (MemInst.isVolatile() || !MemInst.isUnordered()) + return nullptr; + // We can't replace an atomic load with one which isn't also atomic. + if (MemInst.isLoad() && !InVal.IsAtomic && MemInst.isAtomic()) + return nullptr; + Instruction *Earlier = InValFirst ? InVal.DefInst : MemInst.get(); + Instruction *Later = InValFirst ? MemInst.get() : InVal.DefInst; + + if (!isOperatingOnInvariantMemAt(MemInst.get(), InVal.Generation) && + !isSameMemGeneration(InVal.Generation, CurrentGeneration, InVal.DefInst, + MemInst.get())) + return nullptr; + return getOrCreateResult(Later, Earlier->getType()); +} + bool EarlyCSE::processNode(DomTreeNode *Node) { bool Changed = false; BasicBlock *BB = Node->getBlock(); @@ -1170,32 +1207,22 @@ // we can assume the current load loads the same value as the dominating // load. LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand()); - if (InVal.DefInst != nullptr && - InVal.MatchingId == MemInst.getMatchingId() && - // We don't yet handle removing loads with ordering of any kind. - !MemInst.isVolatile() && MemInst.isUnordered() && - // We can't replace an atomic load with one which isn't also atomic. - InVal.IsAtomic >= MemInst.isAtomic() && - (isOperatingOnInvariantMemAt(&Inst, InVal.Generation) || - isSameMemGeneration(InVal.Generation, CurrentGeneration, - InVal.DefInst, &Inst))) { - Value *Op = getOrCreateResult(InVal.DefInst, Inst.getType()); - if (Op != nullptr) { - LLVM_DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << Inst - << " to: " << *InVal.DefInst << '\n'); - if (!DebugCounter::shouldExecute(CSECounter)) { - LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n"); - continue; - } - if (!Inst.use_empty()) - Inst.replaceAllUsesWith(Op); - salvageKnowledge(&Inst, &AC); - removeMSSA(Inst); - Inst.eraseFromParent(); - Changed = true; - ++NumCSELoad; + if (Value *Op = getMatchingValue(InVal, MemInst, CurrentGeneration, + false)) { + LLVM_DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << Inst + << " to: " << *InVal.DefInst << '\n'); + if (!DebugCounter::shouldExecute(CSECounter)) { + LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n"); continue; } + if (!Inst.use_empty()) + Inst.replaceAllUsesWith(Op); + salvageKnowledge(&Inst, &AC); + removeMSSA(Inst); + Inst.eraseFromParent(); + Changed = true; + ++NumCSELoad; + continue; } // Otherwise, remember that we have this instruction. @@ -1265,13 +1292,8 @@ if (MemInst.isValid() && MemInst.isStore()) { LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand()); if (InVal.DefInst && - InVal.DefInst == getOrCreateResult(&Inst, InVal.DefInst->getType()) && - InVal.MatchingId == MemInst.getMatchingId() && - // We don't yet handle removing stores with ordering of any kind. - !MemInst.isVolatile() && MemInst.isUnordered() && - (isOperatingOnInvariantMemAt(&Inst, InVal.Generation) || - isSameMemGeneration(InVal.Generation, CurrentGeneration, - InVal.DefInst, &Inst))) { + InVal.DefInst == getMatchingValue(InVal, MemInst, CurrentGeneration, + true)) { // It is okay to have a LastStore to a different pointer here if MemorySSA // tells us that the load and store are from the same memory generation. // In that case, LastStore should keep its present value since we're