Changeset View
Changeset View
Standalone View
Standalone View
llvm/lib/Transforms/Scalar/GVN.cpp
Show First 20 Lines • Show All 176 Lines • ▼ Show 20 Lines | |||||
/// Materialization of an AvailableValue never fails. An AvailableValue is | /// Materialization of an AvailableValue never fails. An AvailableValue is | ||||
/// implicitly associated with a rematerialization point which is the | /// implicitly associated with a rematerialization point which is the | ||||
/// location of the instruction from which it was formed. | /// location of the instruction from which it was formed. | ||||
struct llvm::gvn::AvailableValue { | struct llvm::gvn::AvailableValue { | ||||
enum ValType { | enum ValType { | ||||
SimpleVal, // A simple offsetted value that is accessed. | SimpleVal, // A simple offsetted value that is accessed. | ||||
LoadVal, // A value produced by a load. | LoadVal, // A value produced by a load. | ||||
MemIntrin, // A memory intrinsic which is loaded from. | MemIntrin, // A memory intrinsic which is loaded from. | ||||
UndefVal // A UndefValue representing a value from dead block (which | UndefVal, // A UndefValue representing a value from dead block (which | ||||
// is not yet physically removed from the CFG). | // is not yet physically removed from the CFG). | ||||
SelectVal, // A pointer select which is loaded from and for which the load | |||||
// can be replace by a value select. | |||||
}; | }; | ||||
/// V - The value that is live out of the block. | /// V - The value that is live out of the block. | ||||
PointerIntPair<Value *, 2, ValType> Val; | PointerIntPair<Value *, 3, ValType> Val; | ||||
/// Offset - The byte offset in Val that is interesting for the load query. | /// Offset - The byte offset in Val that is interesting for the load query. | ||||
unsigned Offset = 0; | unsigned Offset = 0; | ||||
static AvailableValue get(Value *V, unsigned Offset = 0) { | static AvailableValue get(Value *V, unsigned Offset = 0) { | ||||
AvailableValue Res; | AvailableValue Res; | ||||
Res.Val.setPointer(V); | Res.Val.setPointer(V); | ||||
Res.Val.setInt(SimpleVal); | Res.Val.setInt(SimpleVal); | ||||
Show All 20 Lines | struct llvm::gvn::AvailableValue { | ||||
static AvailableValue getUndef() { | static AvailableValue getUndef() { | ||||
AvailableValue Res; | AvailableValue Res; | ||||
Res.Val.setPointer(nullptr); | Res.Val.setPointer(nullptr); | ||||
Res.Val.setInt(UndefVal); | Res.Val.setInt(UndefVal); | ||||
Res.Offset = 0; | Res.Offset = 0; | ||||
return Res; | return Res; | ||||
} | } | ||||
static AvailableValue getSelect(SelectInst *Sel) { | |||||
AvailableValue Res; | |||||
Res.Val.setPointer(Sel); | |||||
Res.Val.setInt(SelectVal); | |||||
Res.Offset = 0; | |||||
return Res; | |||||
} | |||||
bool isSimpleValue() const { return Val.getInt() == SimpleVal; } | bool isSimpleValue() const { return Val.getInt() == SimpleVal; } | ||||
bool isCoercedLoadValue() const { return Val.getInt() == LoadVal; } | bool isCoercedLoadValue() const { return Val.getInt() == LoadVal; } | ||||
bool isMemIntrinValue() const { return Val.getInt() == MemIntrin; } | bool isMemIntrinValue() const { return Val.getInt() == MemIntrin; } | ||||
bool isUndefValue() const { return Val.getInt() == UndefVal; } | bool isUndefValue() const { return Val.getInt() == UndefVal; } | ||||
bool isSelectValue() const { return Val.getInt() == SelectVal; } | |||||
Value *getSimpleValue() const { | Value *getSimpleValue() const { | ||||
assert(isSimpleValue() && "Wrong accessor"); | assert(isSimpleValue() && "Wrong accessor"); | ||||
return Val.getPointer(); | return Val.getPointer(); | ||||
} | } | ||||
LoadInst *getCoercedLoadValue() const { | LoadInst *getCoercedLoadValue() const { | ||||
assert(isCoercedLoadValue() && "Wrong accessor"); | assert(isCoercedLoadValue() && "Wrong accessor"); | ||||
return cast<LoadInst>(Val.getPointer()); | return cast<LoadInst>(Val.getPointer()); | ||||
} | } | ||||
MemIntrinsic *getMemIntrinValue() const { | MemIntrinsic *getMemIntrinValue() const { | ||||
assert(isMemIntrinValue() && "Wrong accessor"); | assert(isMemIntrinValue() && "Wrong accessor"); | ||||
return cast<MemIntrinsic>(Val.getPointer()); | return cast<MemIntrinsic>(Val.getPointer()); | ||||
} | } | ||||
SelectInst *getSelectValue() const { | |||||
assert(isSelectValue() && "Wrong accessor"); | |||||
return cast<SelectInst>(Val.getPointer()); | |||||
} | |||||
/// Emit code at the specified insertion point to adjust the value defined | /// Emit code at the specified insertion point to adjust the value defined | ||||
/// here to the specified type. This handles various coercion cases. | /// here to the specified type. This handles various coercion cases. | ||||
Value *MaterializeAdjustedValue(LoadInst *Load, Instruction *InsertPt, | Value *MaterializeAdjustedValue(LoadInst *Load, Instruction *InsertPt, | ||||
GVNPass &gvn) const; | GVNPass &gvn) const; | ||||
}; | }; | ||||
/// Represents an AvailableValue which can be rematerialized at the end of | /// Represents an AvailableValue which can be rematerialized at the end of | ||||
/// the associated BasicBlock. | /// the associated BasicBlock. | ||||
Show All 15 Lines | static AvailableValueInBlock get(BasicBlock *BB, Value *V, | ||||
unsigned Offset = 0) { | unsigned Offset = 0) { | ||||
return get(BB, AvailableValue::get(V, Offset)); | return get(BB, AvailableValue::get(V, Offset)); | ||||
} | } | ||||
static AvailableValueInBlock getUndef(BasicBlock *BB) { | static AvailableValueInBlock getUndef(BasicBlock *BB) { | ||||
return get(BB, AvailableValue::getUndef()); | return get(BB, AvailableValue::getUndef()); | ||||
} | } | ||||
static AvailableValueInBlock getSelect(BasicBlock *BB, SelectInst *Sel) { | |||||
return get(BB, AvailableValue::getSelect(Sel)); | |||||
} | |||||
/// Emit code at the end of this block to adjust the value defined here to | /// Emit code at the end of this block to adjust the value defined here to | ||||
/// the specified type. This handles various coercion cases. | /// the specified type. This handles various coercion cases. | ||||
Value *MaterializeAdjustedValue(LoadInst *Load, GVNPass &gvn) const { | Value *MaterializeAdjustedValue(LoadInst *Load, GVNPass &gvn) const { | ||||
return AV.MaterializeAdjustedValue(Load, BB->getTerminator(), gvn); | return AV.MaterializeAdjustedValue(Load, BB->getTerminator(), gvn); | ||||
} | } | ||||
}; | }; | ||||
//===----------------------------------------------------------------------===// | //===----------------------------------------------------------------------===// | ||||
▲ Show 20 Lines • Show All 606 Lines • ▼ Show 20 Lines | for (const AvailableValueInBlock &AV : ValuesPerBlock) { | ||||
SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(Load, gvn)); | SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(Load, gvn)); | ||||
} | } | ||||
// Perform PHI construction. | // Perform PHI construction. | ||||
return SSAUpdate.GetValueInMiddleOfBlock(Load->getParent()); | return SSAUpdate.GetValueInMiddleOfBlock(Load->getParent()); | ||||
} | } | ||||
static LoadInst *findDominatingLoad(Value *Ptr, SelectInst *Sel, | |||||
DominatorTree &DT) { | |||||
for (Value *U : Ptr->users()) { | |||||
auto *LI = dyn_cast<LoadInst>(U); | |||||
if (LI && LI->getParent() == Sel->getParent() && DT.dominates(LI, Sel)) | |||||
return LI; | |||||
} | |||||
return nullptr; | |||||
} | |||||
Value *AvailableValue::MaterializeAdjustedValue(LoadInst *Load, | Value *AvailableValue::MaterializeAdjustedValue(LoadInst *Load, | ||||
Instruction *InsertPt, | Instruction *InsertPt, | ||||
GVNPass &gvn) const { | GVNPass &gvn) const { | ||||
Value *Res; | Value *Res; | ||||
Type *LoadTy = Load->getType(); | Type *LoadTy = Load->getType(); | ||||
const DataLayout &DL = Load->getModule()->getDataLayout(); | const DataLayout &DL = Load->getModule()->getDataLayout(); | ||||
if (isSimpleValue()) { | if (isSimpleValue()) { | ||||
Res = getSimpleValue(); | Res = getSimpleValue(); | ||||
Show All 24 Lines | if (isSimpleValue()) { | ||||
} | } | ||||
} else if (isMemIntrinValue()) { | } else if (isMemIntrinValue()) { | ||||
Res = getMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy, | Res = getMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy, | ||||
InsertPt, DL); | InsertPt, DL); | ||||
LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset | LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset | ||||
<< " " << *getMemIntrinValue() << '\n' | << " " << *getMemIntrinValue() << '\n' | ||||
<< *Res << '\n' | << *Res << '\n' | ||||
<< "\n\n\n"); | << "\n\n\n"); | ||||
} else if (isSelectValue()) { | |||||
// Introduce a new value select for a load from an eligible pointer select. | |||||
SelectInst *Sel = getSelectValue(); | |||||
LoadInst *L1 = | |||||
findDominatingLoad(Sel->getOperand(1), Sel, gvn.getDominatorTree()); | |||||
LoadInst *L2 = | |||||
findDominatingLoad(Sel->getOperand(2), Sel, gvn.getDominatorTree()); | |||||
assert(L1 && L2 && | |||||
reames: I wasn't sure GVN already relied on comesBefore caching and thus was initial concerned this… | |||||
"must be able to obtain dominating loads for both value operands of " | |||||
"the select"); | |||||
Res = SelectInst::Create(Sel->getCondition(), L1, L2, "", Sel); | |||||
} else { | } else { | ||||
llvm_unreachable("Should not materialize value from dead block"); | llvm_unreachable("Should not materialize value from dead block"); | ||||
} | } | ||||
assert(Res && "failed to materialize?"); | assert(Res && "failed to materialize?"); | ||||
return Res; | return Res; | ||||
} | } | ||||
static bool isLifetimeStart(const Instruction *Inst) { | static bool isLifetimeStart(const Instruction *Inst) { | ||||
Remove the extra def? reames: Remove the extra def? | |||||
if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst)) | if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst)) | ||||
return II->getIntrinsicID() == Intrinsic::lifetime_start; | return II->getIntrinsicID() == Intrinsic::lifetime_start; | ||||
return false; | return false; | ||||
} | } | ||||
/// Assuming To can be reached from both From and Between, does Between lie on | /// Assuming To can be reached from both From and Between, does Between lie on | ||||
/// every path from From to To? | /// every path from From to To? | ||||
static bool liesBetween(const Instruction *From, Instruction *Between, | static bool liesBetween(const Instruction *From, Instruction *Between, | ||||
▲ Show 20 Lines • Show All 62 Lines • ▼ Show 20 Lines | static void reportMayClobberedLoad(LoadInst *Load, MemDepResult DepInfo, | ||||
if (OtherAccess) | if (OtherAccess) | ||||
R << " in favor of " << NV("OtherAccess", OtherAccess); | R << " in favor of " << NV("OtherAccess", OtherAccess); | ||||
R << " because it is clobbered by " << NV("ClobberedBy", DepInfo.getInst()); | R << " because it is clobbered by " << NV("ClobberedBy", DepInfo.getInst()); | ||||
ORE->emit(R); | ORE->emit(R); | ||||
} | } | ||||
/// Check if a load from pointer-select \p Address in \p DepBB can be converted | |||||
/// to a value select. The following conditions need to be satisfied: | |||||
/// 1. The pointer select (\p Address) must be defined in \p DepBB. | |||||
/// 2. Both value operands of the pointer select must be loaded in the same | |||||
/// basic block, before the pointer select. | |||||
/// 3. There must be no instructions between the found loads and \p End that may | |||||
/// clobber the loads. | |||||
static Optional<AvailableValue> | |||||
tryToConvertLoadOfPtrSelect(BasicBlock *DepBB, BasicBlock::iterator End, | |||||
Value *Address, DominatorTree &DT, AAResults *AA) { | |||||
auto *Sel = dyn_cast_or_null<SelectInst>(Address); | |||||
if (!Sel || DepBB != Sel->getParent()) | |||||
return None; | |||||
LoadInst *L1 = findDominatingLoad(Sel->getOperand(1), Sel, DT); | |||||
LoadInst *L2 = findDominatingLoad(Sel->getOperand(2), Sel, DT); | |||||
if (!L1 || !L2) | |||||
return None; | |||||
// Ensure there are no accesses that may modify the locations referenced by | |||||
// either L1 or L2 between L1, L2 and the specified End iterator. | |||||
Instruction *EarlierLoad = L1->comesBefore(L2) ? L1 : L2; | |||||
MemoryLocation L1Loc = MemoryLocation::get(L1); | |||||
MemoryLocation L2Loc = MemoryLocation::get(L2); | |||||
if (any_of(make_range(EarlierLoad->getIterator(), End), [&](Instruction &I) { | |||||
return isModSet(AA->getModRefInfo(&I, L1Loc)) || | |||||
isModSet(AA->getModRefInfo(&I, L2Loc)); | |||||
})) | |||||
return None; | |||||
return AvailableValue::getSelect(Sel); | |||||
} | |||||
bool GVNPass::AnalyzeLoadAvailability(LoadInst *Load, MemDepResult DepInfo, | bool GVNPass::AnalyzeLoadAvailability(LoadInst *Load, MemDepResult DepInfo, | ||||
Value *Address, AvailableValue &Res) { | Value *Address, AvailableValue &Res) { | ||||
if (!DepInfo.isDef() && !DepInfo.isClobber()) { | |||||
assert(isa<SelectInst>(Address)); | |||||
if (auto R = tryToConvertLoadOfPtrSelect( | |||||
Load->getParent(), Load->getIterator(), Address, getDominatorTree(), | |||||
getAliasAnalysis())) { | |||||
Res = *R; | |||||
return true; | |||||
} | |||||
return false; | |||||
} | |||||
assert((DepInfo.isDef() || DepInfo.isClobber()) && | assert((DepInfo.isDef() || DepInfo.isClobber()) && | ||||
"expected a local dependence"); | "expected a local dependence"); | ||||
assert(Load->isUnordered() && "rules below are incorrect for ordered access"); | assert(Load->isUnordered() && "rules below are incorrect for ordered access"); | ||||
const DataLayout &DL = Load->getModule()->getDataLayout(); | const DataLayout &DL = Load->getModule()->getDataLayout(); | ||||
Instruction *DepInst = DepInfo.getInst(); | Instruction *DepInst = DepInfo.getInst(); | ||||
if (DepInfo.isClobber()) { | if (DepInfo.isClobber()) { | ||||
▲ Show 20 Lines • Show All 51 Lines • ▼ Show 20 Lines | if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInst)) { | ||||
int Offset = analyzeLoadFromClobberingMemInst(Load->getType(), Address, | int Offset = analyzeLoadFromClobberingMemInst(Load->getType(), Address, | ||||
DepMI, DL); | DepMI, DL); | ||||
if (Offset != -1) { | if (Offset != -1) { | ||||
Res = AvailableValue::getMI(DepMI, Offset); | Res = AvailableValue::getMI(DepMI, Offset); | ||||
return true; | return true; | ||||
} | } | ||||
} | } | ||||
} | } | ||||
// Nothing known about this clobber, have to be conservative | // Nothing known about this clobber, have to be conservative | ||||
LLVM_DEBUG( | LLVM_DEBUG( | ||||
// fast print dep, using operator<< on instruction is too slow. | // fast print dep, using operator<< on instruction is too slow. | ||||
dbgs() << "GVN: load "; Load->printAsOperand(dbgs()); | dbgs() << "GVN: load "; Load->printAsOperand(dbgs()); | ||||
dbgs() << " is clobbered by " << *DepInst << '\n';); | dbgs() << " is clobbered by " << *DepInst << '\n';); | ||||
if (ORE->allowExtraAnalysis(DEBUG_TYPE)) | if (ORE->allowExtraAnalysis(DEBUG_TYPE)) | ||||
reportMayClobberedLoad(Load, DepInfo, DT, ORE); | reportMayClobberedLoad(Load, DepInfo, DT, ORE); | ||||
▲ Show 20 Lines • Show All 63 Lines • ▼ Show 20 Lines | void GVNPass::AnalyzeLoadAvailability(LoadInst *Load, LoadDepVect &Deps, | ||||
// that could potentially clobber the load). | // that could potentially clobber the load). | ||||
unsigned NumDeps = Deps.size(); | unsigned NumDeps = Deps.size(); | ||||
for (unsigned i = 0, e = NumDeps; i != e; ++i) { | for (unsigned i = 0, e = NumDeps; i != e; ++i) { | ||||
BasicBlock *DepBB = Deps[i].getBB(); | BasicBlock *DepBB = Deps[i].getBB(); | ||||
MemDepResult DepInfo = Deps[i].getResult(); | MemDepResult DepInfo = Deps[i].getResult(); | ||||
if (DeadBlocks.count(DepBB)) { | if (DeadBlocks.count(DepBB)) { | ||||
// Dead dependent mem-op disguise as a load evaluating the same value | // Dead dependent mem-op disguise as a load evaluating the same value | ||||
// as the load in question. | // as the load in question. | ||||
Repeated code, pull out a static function. reames: Repeated code, pull out a static function. | |||||
ValuesPerBlock.push_back(AvailableValueInBlock::getUndef(DepBB)); | ValuesPerBlock.push_back(AvailableValueInBlock::getUndef(DepBB)); | ||||
continue; | continue; | ||||
} | } | ||||
if (!DepInfo.isDef() && !DepInfo.isClobber()) { | |||||
UnavailableBlocks.push_back(DepBB); | |||||
continue; | |||||
} | |||||
// The address being loaded in this non-local block may not be the same as | // The address being loaded in this non-local block may not be the same as | ||||
// the pointer operand of the load if PHI translation occurs. Make sure | // the pointer operand of the load if PHI translation occurs. Make sure | ||||
// to consider the right address. | // to consider the right address. | ||||
Value *Address = Deps[i].getAddress(); | Value *Address = Deps[i].getAddress(); | ||||
if (!DepInfo.isDef() && !DepInfo.isClobber()) { | |||||
if (auto R = tryToConvertLoadOfPtrSelect(DepBB, DepBB->end(), Address, | |||||
getDominatorTree(), | |||||
You appear to only be handling the case where the select and it's loads are in a different block from the load being removed. Please add the corresponding case for block local code as well. reames: You appear to only be handling the case where the select and it's loads are in a different… | |||||
getAliasAnalysis())) { | |||||
ValuesPerBlock.push_back( | |||||
Unless I'm really missing something obvious, you've got a serious omission here. The memory dependence walk already done tells us there's no clobber between the select and the using loads. I do not see anything in the added code which checks for a clobber between select and the loads above it. L1 = load A1 reames: Unless I'm really missing something obvious, you've got a serious omission here.
The memory… | |||||
AvailableValueInBlock::get(DepBB, std::move(*R))); | |||||
continue; | |||||
} | |||||
UnavailableBlocks.push_back(DepBB); | |||||
continue; | |||||
} | |||||
AvailableValue AV; | AvailableValue AV; | ||||
if (AnalyzeLoadAvailability(Load, DepInfo, Address, AV)) { | if (AnalyzeLoadAvailability(Load, DepInfo, Address, AV)) { | ||||
// subtlety: because we know this was a non-local dependency, we know | // subtlety: because we know this was a non-local dependency, we know | ||||
// it's safe to materialize anywhere between the instruction within | // it's safe to materialize anywhere between the instruction within | ||||
// DepInfo and the end of it's block. | // DepInfo and the end of it's block. | ||||
ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, | ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, | ||||
std::move(AV))); | std::move(AV))); | ||||
} else { | } else { | ||||
▲ Show 20 Lines • Show All 430 Lines • ▼ Show 20 Lines | bool GVNPass::processNonLocalLoad(LoadInst *Load) { | ||||
// it will be too expensive. | // it will be too expensive. | ||||
unsigned NumDeps = Deps.size(); | unsigned NumDeps = Deps.size(); | ||||
if (NumDeps > MaxNumDeps) | if (NumDeps > MaxNumDeps) | ||||
return false; | return false; | ||||
// If we had a phi translation failure, we'll have a single entry which is a | // If we had a phi translation failure, we'll have a single entry which is a | ||||
// clobber in the current block. Reject this early. | // clobber in the current block. Reject this early. | ||||
if (NumDeps == 1 && | if (NumDeps == 1 && | ||||
!Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) { | !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) { | ||||
chillUnsubmitted Not Done ReplyInline ActionsShouldn't we allow the function to continue if we came here with a SelectInst chill: Shouldn't we allow the function to continue if we came here with a `SelectInst`
in the dep… | |||||
LLVM_DEBUG(dbgs() << "GVN: non-local load "; Load->printAsOperand(dbgs()); | LLVM_DEBUG(dbgs() << "GVN: non-local load "; Load->printAsOperand(dbgs()); | ||||
dbgs() << " has unknown dependencies\n";); | dbgs() << " has unknown dependencies\n";); | ||||
return false; | return false; | ||||
} | } | ||||
bool Changed = false; | bool Changed = false; | ||||
// If this load follows a GEP, see if we can PRE the indices before analyzing. | // If this load follows a GEP, see if we can PRE the indices before analyzing. | ||||
if (GetElementPtrInst *GEP = | if (GetElementPtrInst *GEP = | ||||
▲ Show 20 Lines • Show All 274 Lines • ▼ Show 20 Lines | bool GVNPass::processLoad(LoadInst *L) { | ||||
// ... to a pointer that has been loaded from before... | // ... to a pointer that has been loaded from before... | ||||
MemDepResult Dep = MD->getDependency(L); | MemDepResult Dep = MD->getDependency(L); | ||||
// If it is defined in another block, try harder. | // If it is defined in another block, try harder. | ||||
if (Dep.isNonLocal()) | if (Dep.isNonLocal()) | ||||
return processNonLocalLoad(L); | return processNonLocalLoad(L); | ||||
Value *Address = L->getPointerOperand(); | |||||
// Only handle the local case below | // Only handle the local case below | ||||
if (!Dep.isDef() && !Dep.isClobber()) { | if (!Dep.isDef() && !Dep.isClobber() && !isa<SelectInst>(Address)) { | ||||
// This might be a NonFuncLocal or an Unknown | // This might be a NonFuncLocal or an Unknown | ||||
LLVM_DEBUG( | LLVM_DEBUG( | ||||
// fast print dep, using operator<< on instruction is too slow. | // fast print dep, using operator<< on instruction is too slow. | ||||
dbgs() << "GVN: load "; L->printAsOperand(dbgs()); | dbgs() << "GVN: load "; L->printAsOperand(dbgs()); | ||||
dbgs() << " has unknown dependence\n";); | dbgs() << " has unknown dependence\n";); | ||||
return false; | return false; | ||||
} | } | ||||
AvailableValue AV; | AvailableValue AV; | ||||
if (AnalyzeLoadAvailability(L, Dep, L->getPointerOperand(), AV)) { | if (AnalyzeLoadAvailability(L, Dep, Address, AV)) { | ||||
Value *AvailableValue = AV.MaterializeAdjustedValue(L, L, *this); | Value *AvailableValue = AV.MaterializeAdjustedValue(L, L, *this); | ||||
// Replace the load! | // Replace the load! | ||||
patchAndReplaceAllUsesWith(L, AvailableValue); | patchAndReplaceAllUsesWith(L, AvailableValue); | ||||
markInstructionForDeletion(L); | markInstructionForDeletion(L); | ||||
if (MSSAU) | if (MSSAU) | ||||
MSSAU->removeMemoryAccess(L); | MSSAU->removeMemoryAccess(L); | ||||
++NumGVNLoad; | ++NumGVNLoad; | ||||
▲ Show 20 Lines • Show All 1,175 Lines • Show Last 20 Lines |
I wasn't sure GVN already relied on comesBefore caching and thus was initial concerned this might be a performance bottleneck, but it looks like there's a bunch of cases we already assume block order is up to date, so this should be a non-issue.