Index: llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp =================================================================== --- llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp +++ llvm/lib/CodeGen/AssignmentTrackingAnalysis.cpp @@ -31,6 +31,10 @@ MaxNumBlocks("debug-ata-max-blocks", cl::init(10000), cl::desc("Maximum num basic blocks before debug info dropped"), cl::Hidden); +/// Option for debugging the pass, determines if the memory location fragment +/// fillling happens after generating the variable locations. +static cl::opt EnableMemLocFragFill("mem-loc-frag-fill", cl::init(true), + cl::Hidden); /// Print the results of the analysis. Respects -filter-print-funcs. static cl::opt PrintResults("print-debug-ata", cl::init(false), cl::Hidden); @@ -214,11 +218,625 @@ return End; } +/// Extract the offset used in \p DIExpr. Returns None if the expression +/// doesn't explicitly describe a memory location with DW_OP_deref or if the +/// expression is too complex to interpret. +static Optional getDerefOffsetInBytes(const DIExpression *DIExpr) { + int64_t Offset = 0; + const unsigned NumElements = DIExpr->getNumElements(); + const auto Elements = DIExpr->getElements(); + unsigned NextElement = 0; + // Extract the offset. + if (NumElements > 2 && Elements[0] == dwarf::DW_OP_plus_uconst) { + Offset = Elements[1]; + NextElement = 2; + } else if (NumElements > 3 && Elements[0] == dwarf::DW_OP_constu) { + NextElement = 3; + if (Elements[2] == dwarf::DW_OP_plus) + Offset = Elements[1]; + else if (Elements[2] == dwarf::DW_OP_minus) + Offset = -Elements[1]; + else + return None; + } + + // If that's all there is it means there's no deref. + if (NextElement >= NumElements) + return None; + + // Check this ends in deref or deref then fragment. + if (Elements[NextElement] == dwarf::DW_OP_deref) { + if (NumElements == NextElement + 1) + return Offset; // Ends with deref. + else if (NumElements == NextElement + 3 && + Elements[NextElement] == dwarf::DW_OP_LLVM_fragment) + return Offset; // Ends with deref + fragment. + } + + // Don't bother trying to interpret anything more complex. + return None; +} + /// A whole (unfragmented) source variable. using DebugAggregate = std::pair; static DebugAggregate getAggregate(const DbgVariableIntrinsic *DII) { return DebugAggregate(DII->getVariable(), DII->getDebugLoc().getInlinedAt()); } +static DebugAggregate getAggregate(const DebugVariable &Var) { + return DebugAggregate(Var.getVariable(), Var.getInlinedAt()); +} + +/// In dwarf emission, the following sequence +/// 1. dbg.value ... Fragment(0, 64) +/// 2. dbg.value ... Fragment(0, 32) +/// effectively sets Fragment(32, 32) to undef (each def sets all bits not in +/// the intersection of the fragments to having "no location"). This makes +/// sense for implicit location values because splitting the computed values +/// could be troublesome, and is probably quite uncommon. +/// When we convert dbg.assigns to dbg.value+deref this kind of thing is common, +/// and describing a location (memory) rather than a value means we don't need +/// to worry about splitting any values. +/// This class performs a(nother) dataflow analysis over the function, adding +/// variable locations so that any bits of a variable with a memory location +/// have that location explicitly reinstated at each subsequent variable +/// location definition that that doesn't overwrite those bits. i.e. after a +/// variable location def, insert new defs for the memory location with +/// fragments for the difference of "all bits currently in memory" and "the +/// fragment of the second def". +class MemLocFragmentFill { + Function &Fn; + FunctionVarLocsBuilder *FnVarLocs; + const DenseSet *VarsWithStackSlot; + + // 0 = no memory location. + using BaseAddress = unsigned; + using OffsetInBitsTy = unsigned; + using FragTraits = IntervalMapHalfOpenInfo; + using FragsInMemMap = IntervalMap< + OffsetInBitsTy, BaseAddress, + IntervalMapImpl::NodeSizer::LeafSize, + FragTraits>; + FragsInMemMap::Allocator IntervalMapAlloc; + using Map = DenseMap; + + // IDs for memory location base addresses in maps. Use 0 to indicate that + // there's no memory location. + UniqueVector Bases; + UniqueVector Aggregates; + DenseMap LiveIn; + DenseMap LiveOut; + + struct FragMemLoc { + unsigned Var; + unsigned Base; + unsigned OffsetInBits; + unsigned SizeInBits; + DebugLoc DL; + }; + using InsertMap = MapVector>; + DenseMap BBInsertBeforeMap; + + static bool equ(const FragsInMemMap &A, const FragsInMemMap &B) { + auto AIt = A.begin(), AEnd = A.end(); + auto BIt = B.begin(), BEnd = B.end(); + for (; AIt != AEnd; ++AIt, ++BIt) { + if (BIt == BEnd) + return false; // B has fewer elements than A. + if (AIt.start() != BIt.start() || AIt.stop() != BIt.stop()) + return false; // Interval is different. + if (AIt.value() != BIt.value()) + return false; // Value at interval is different. + } + // AIt == AEnd. Check BIt is also now at end. + return BIt == BEnd; + } + + static bool equ(const Map &A, const Map &B) { + if (A.size() != B.size()) + return false; + for (const auto &APair : A) { + auto BIt = B.find(APair.first); + if (BIt == B.end()) + return false; + if (!equ(APair.second, BIt->second)) + return false; + } + return true; + } + + /// Return a string for the value that \p BaseID represents. + std::string toString(unsigned BaseID) { + if (BaseID) + return Bases[BaseID]->getName().str(); + else + return "None"; + } + + /// Format string describing an FragsInMemMap (IntervalMap) interval. + std::string toString(FragsInMemMap::const_iterator It, bool Newline = true) { + std::string String; + std::stringstream S(String); + if (It.valid()) { + S << "[" << It.start() << ", " << It.stop() + << "): " << toString(It.value()); + } else { + S << "invalid iterator (end)"; + } + if (Newline) + S << "\n"; + return S.str(); + }; + + FragsInMemMap meetFragments(const FragsInMemMap &A, const FragsInMemMap &B) { + FragsInMemMap Result(IntervalMapAlloc); + for (auto AIt = A.begin(), AEnd = A.end(); AIt != AEnd; ++AIt) { + LLVM_DEBUG(dbgs() << "a " << toString(AIt)); + // This is basically copied from process() and inverted (process is + // performing something like a union whereas this is more of an + // intersect). + + // There's no work to do if interval `a` overlaps no fragments in map `B`. + if (!B.overlaps(AIt.start(), AIt.stop())) + continue; + + // Does StartBit intersect an existing fragment? + auto FirstOverlap = B.find(AIt.start()); + assert(FirstOverlap != B.end()); + bool IntersectStart = FirstOverlap.start() < AIt.start(); + LLVM_DEBUG(dbgs() << "- FirstOverlap " << toString(FirstOverlap, false) + << ", IntersectStart: " << IntersectStart << "\n"); + + // Does EndBit intersect an existing fragment? + auto LastOverlap = B.find(AIt.stop()); + bool IntersectEnd = + LastOverlap != B.end() && LastOverlap.start() < AIt.stop(); + LLVM_DEBUG(dbgs() << "- LastOverlap " << toString(LastOverlap, false) + << ", IntersectEnd: " << IntersectEnd << "\n"); + + // Check if both ends of `a` intersect the same interval `b`. + if (IntersectStart && IntersectEnd && FirstOverlap == LastOverlap) { + // Insert `a` (`a` is contained in `b`) if the values match. + // [ a ] + // [ - b - ] + // - + // [ r ] + LLVM_DEBUG(dbgs() + << "- a is contained within " << toString(FirstOverlap)); + if (AIt.value() && AIt.value() == FirstOverlap.value()) + Result.insert(AIt.start(), AIt.stop(), AIt.value()); + } else { + // There's an overlap but `a` is not fully contained within + // `b`. Shorten any end-point intersections. + // [ - a - ] + // [ - b - ] + // - + // [ r ] + auto Next = FirstOverlap; + if (IntersectStart) { + LLVM_DEBUG(dbgs() << "- insert intersection of a and " + << toString(FirstOverlap)); + if (AIt.value() && AIt.value() == FirstOverlap.value()) + Result.insert(AIt.start(), FirstOverlap.stop(), AIt.value()); + ++Next; + } + // [ - a - ] + // [ - b - ] + // - + // [ r ] + if (IntersectEnd) { + LLVM_DEBUG(dbgs() << "- insert intersection of a and " + << toString(LastOverlap)); + if (AIt.value() && AIt.value() == LastOverlap.value()) + Result.insert(LastOverlap.start(), AIt.stop(), AIt.value()); + } + + // Insert all intervals in map `B` that are contained within interval + // `a` where the values match. + // [ - - a - - ] + // [ b1 ] [ b2 ] + // - + // [ r1 ] [ r2 ] + while (Next != B.end() && Next.start() < AIt.stop() && + Next.stop() <= AIt.stop()) { + LLVM_DEBUG(dbgs() + << "- insert intersection of a and " << toString(Next)); + if (AIt.value() && AIt.value() == Next.value()) + Result.insert(Next.start(), Next.stop(), Next.value()); + ++Next; + } + } + } + return Result; + } + + void meetVars(Map &A, Map &B) { + // Meet A and B. + // + // Result = meet(a, b) for a in A, b in B where Var(a) == Var(b) + for (auto It = A.begin(), End = A.end(); It != End; ++It) { + unsigned AVar = It->first; + FragsInMemMap &AFrags = It->second; + auto BIt = B.find(AVar); + if (BIt == B.end()) { + A.erase(It); + continue; // Var has no bits defined in B. + } + LLVM_DEBUG(dbgs() << "meet fragment maps for " + << Aggregates[AVar].first->getName() << "\n"); + AFrags = meetFragments(AFrags, BIt->second); + } + } + + bool meet(const BasicBlock &BB, + const SmallPtrSet &Visited) { + LLVM_DEBUG(dbgs() << "meet block info from preds of " << BB.getName() + << "\n"); + + Map BBLiveIn; + bool FirstMeet = true; + // LiveIn locs for BB is the meet of the already-processed preds' LiveOut + // locs. + for (auto I = pred_begin(&BB), E = pred_end(&BB); I != E; I++) { + // Ignore preds that haven't been processed yet. This is essentially the + // same as initialising all variables to implicit top value (⊤) which is + // the identity value for the meet operation. + const BasicBlock *Pred = *I; + if (!Visited.count(Pred)) + continue; + + auto PredLiveOut = LiveOut.find(Pred); + assert(PredLiveOut != LiveOut.end()); + + if (FirstMeet) { + LLVM_DEBUG(dbgs() << "BBLiveIn = " << Pred->getName() << "\n"); + BBLiveIn = PredLiveOut->second; + FirstMeet = false; + } else { + LLVM_DEBUG(dbgs() << "BBLiveIn = meet BBLiveIn, " << Pred->getName() + << "\n"); + meetVars(BBLiveIn, PredLiveOut->second); + } + + // An empty set is ⊥ for the intersect-like meet operation. If we've + // already got ⊥ there's no need to run the code - we know the result is + // ⊥ since `meet(a, ⊥) = ⊥`. + if (BBLiveIn.size() == 0) + break; + } + + auto CurrentLiveInEntry = LiveIn.find(&BB); + // If there's no LiveIn entry for the block yet, add it. + if (CurrentLiveInEntry == LiveIn.end()) { + LLVM_DEBUG(dbgs() << "change=true (first) on meet on " << BB.getName() + << "\n"); + LiveIn[&BB] = std::move(BBLiveIn); + return /*Changed=*/true; + } + + // If the LiveIn set has changed (expensive check) update it and return + // true. + if (!equ(BBLiveIn, CurrentLiveInEntry->second)) { + LLVM_DEBUG(dbgs() << "change=true on meet on " << BB.getName() << "\n"); + CurrentLiveInEntry->second = std::move(BBLiveIn); + return /*Changed=*/true; + } + + LLVM_DEBUG(dbgs() << "change=false on meet on " << BB.getName() << "\n"); + return /*Changed=*/false; + } + + void insertMemLoc(BasicBlock &BB, Instruction &Before, unsigned Var, + unsigned StartBit, unsigned EndBit, unsigned Base, + DebugLoc DL) { + if (!Base) + return; + FragMemLoc Loc; + Loc.Var = Var; + Loc.OffsetInBits = StartBit; + Loc.SizeInBits = EndBit - StartBit; + assert(Base && "Expected a non-zero ID for Base address"); + Loc.Base = Base; + Loc.DL = DL; + BBInsertBeforeMap[&BB][&Before].push_back(Loc); + LLVM_DEBUG(dbgs() << "Add mem def for " << Aggregates[Var].first->getName() + << " bits [" << StartBit << ", " << EndBit << ")\n"); + } + + void addDef(VarLocInfo VarLoc, Instruction &After, BasicBlock &BB, + Map &LiveSet) { + DebugVariable DbgVar = FnVarLocs->getVariable(VarLoc.VariableID); + if (skipVariable(DbgVar.getVariable())) + return; + // Don't bother doing anything for this variables if we know it's fully + // promoted. We're only interested in variables that (sometimes) live on + // the stack here. + if (!VarsWithStackSlot->count(getAggregate(DbgVar))) + return; + unsigned Var = Aggregates.insert( + DebugAggregate(DbgVar.getVariable(), VarLoc.DL.getInlinedAt())); + + // [StartBit: EndBit) are the bits affected by this def. + const DIExpression *DIExpr = VarLoc.Expr; + unsigned StartBit; + unsigned EndBit; + if (auto Frag = DIExpr->getFragmentInfo()) { + StartBit = Frag->OffsetInBits; + EndBit = StartBit + Frag->SizeInBits; + } else { + assert(static_cast(DbgVar.getVariable()->getSizeInBits())); + StartBit = 0; + EndBit = *DbgVar.getVariable()->getSizeInBits(); + } + + // We will only fill fragments for simple memory-describing dbg.value + // intrinsics. If the fragment offset is the same as the offset from the + // base pointer, do The Thing, otherwise fall back to normal dbg.value + // behaviour. AssignmentTrackingLowering has generated DIExpressions + // written in terms of the base pointer. + // TODO: Remove this condition since the fragment offset doesn't always + // equal the offset from base pointer (e.g. for a SROA-split variable). + const auto DerefOffsetInBytes = getDerefOffsetInBytes(DIExpr); + const unsigned Base = + DerefOffsetInBytes && *DerefOffsetInBytes * 8 == StartBit + ? Bases.insert(VarLoc.V) + : 0; + LLVM_DEBUG(dbgs() << "DEF " << DbgVar.getVariable()->getName() << " [" + << StartBit << ", " << EndBit << "): " << toString(Base) + << "\n"); + + // First of all, any locs that use mem that are disrupted need reinstating. + // Unfortunately, IntervalMap doesn't let us insert intervals that overlap + // with existing intervals so this code involves a lot of fiddling around + // with intervals to do that manually. + auto FragIt = LiveSet.find(Var); + if (FragIt != LiveSet.end()) { + FragsInMemMap &FragMap = FragIt->second; + // First check the easy case: the new fragment `f` doesn't overlap with + // any intervals. + if (!FragMap.overlaps(StartBit, EndBit)) { + LLVM_DEBUG(dbgs() << "- No overlaps\n"); + FragMap.insert(StartBit, EndBit, Base); + } else { + // There is at least one overlap. + + // Does StartBit intersect an existing fragment? + auto FirstOverlap = FragMap.find(StartBit); + assert(FirstOverlap != FragMap.end()); + bool IntersectStart = FirstOverlap.start() < StartBit; + + // Does EndBit intersect an existing fragment? + auto LastOverlap = FragMap.find(EndBit); + bool IntersectEnd = LastOverlap.valid() && LastOverlap.start() < EndBit; + + // Check if both ends of `f` intersect the same interval `i`. + if (IntersectStart && IntersectEnd && FirstOverlap == LastOverlap) { + LLVM_DEBUG(dbgs() << "- Intersect single interval @ both ends\n"); + // Shorten `i` so that there's space to insert `f`. + // [ f ] + // [ - i - ] + // + + // [ f ][ i ] + auto EndBitOfOverlap = FirstOverlap.stop(); + FirstOverlap.setStop(StartBit); + insertMemLoc(BB, After, Var, FirstOverlap.start(), StartBit, + FirstOverlap.value(), VarLoc.DL); + + // Insert a new interval to represent the end part. + FragMap.insert(EndBit, EndBitOfOverlap, FirstOverlap.value()); + insertMemLoc(BB, After, Var, EndBit, EndBitOfOverlap, + FirstOverlap.value(), VarLoc.DL); + + // Insert the new (middle) fragment now there is space. + FragMap.insert(StartBit, EndBit, Base); + } else { + // There's an overlap but `f` is not fully contained within + // `i`. Shorten any end-point intersections so that we can + // then insert `f`. + + // [ - f - ] + // [ - i - ] + // | | + // [ i ] + // Shorten any end-point intersections. + if (IntersectStart) { + LLVM_DEBUG(dbgs() << "- Intersect interval at start\n"); + // Split off at the intersection. + FirstOverlap.setStop(StartBit); + insertMemLoc(BB, After, Var, FirstOverlap.start(), StartBit, + FirstOverlap.value(), VarLoc.DL); + } + // [ - f - ] + // [ - i - ] + // | | + // [ i ] + if (IntersectEnd) { + LLVM_DEBUG(dbgs() << "- Intersect interval at end\n"); + // Split off at the intersection. + LastOverlap.setStart(EndBit); + insertMemLoc(BB, After, Var, EndBit, LastOverlap.stop(), + LastOverlap.value(), VarLoc.DL); + } + + LLVM_DEBUG(dbgs() << "- Erase intervals contained within\n"); + // FirstOverlap and LastOverlap have been shortened such that they're + // no longer overlapping with [StartBit, EndBit). Delete any overlaps + // that remain (these will be fully contained within `f`). + // [ - f - ] } + // [ - i - ] } Intersection shortening that has happened above. + // | | } + // [ i ] } + // ----------------- + // [i2 ] } Intervals fully contained within `f` get erased. + // ----------------- + // [ - f - ][ i ] } Completed insertion. + auto It = FirstOverlap; + if (IntersectStart) + ++It; // IntersectStart: first overlap has been shortened. + while (It.valid() && It.start() >= StartBit && It.stop() <= EndBit) { + LLVM_DEBUG(dbgs() << "- Erase " << toString(It)); + It.erase(); // This increments It after removing the interval. + } + // We've dealt with all the overlaps now! + assert(!FragMap.overlaps(StartBit, EndBit)); + LLVM_DEBUG(dbgs() << "- Insert DEF into now-empty space\n"); + FragMap.insert(StartBit, EndBit, Base); + } + } + } else { + // Add this variable to the BB map. + auto P = LiveSet.try_emplace(Var, FragsInMemMap(IntervalMapAlloc)); + assert(P.second && "Var already in map?"); + // Add the interval to the fragment map. + P.first->second.insert(StartBit, EndBit, Base); + } + } + + bool skipVariable(const DILocalVariable *V) { return !V->getSizeInBits(); } + + void process(BasicBlock &BB, Map &LiveSet) { + BBInsertBeforeMap[&BB].clear(); + for (auto &I : BB) { + if (const auto *Locs = FnVarLocs->getWedge(&I)) { + for (const VarLocInfo &Loc : *Locs) { + addDef(Loc, I, *I.getParent(), LiveSet); + } + } + } + } + +public: + MemLocFragmentFill(Function &Fn, + const DenseSet *VarsWithStackSlot) + : Fn(Fn), VarsWithStackSlot(VarsWithStackSlot) {} + + /// Add variable locations to \p FnVarLocs so that any bits of a variable + /// with a memory location have that location explicitly reinstated at each + /// subsequent variable location definition that that doesn't overwrite those + /// bits. i.e. after a variable location def, insert new defs for the memory + /// location with fragments for the difference of "all bits currently in + /// memory" and "the fragment of the second def". e.g. + /// + /// Before: + /// + /// var x bits 0 to 63: value in memory + /// more instructions + /// var x bits 0 to 31: value is %0 + /// + /// After: + /// + /// var x bits 0 to 63: value in memory + /// more instructions + /// var x bits 0 to 31: value is %0 + /// var x bits 32 to 61: value in memory ; <-- new loc def + /// + void run(FunctionVarLocsBuilder *FnVarLocs) { + if (!EnableMemLocFragFill) + return; + + this->FnVarLocs = FnVarLocs; + + // Prepare for traversal. + // + ReversePostOrderTraversal RPOT(&Fn); + std::priority_queue, + std::greater> + Worklist; + std::priority_queue, + std::greater> + Pending; + DenseMap OrderToBB; + DenseMap BBToOrder; + { // Init OrderToBB and BBToOrder. + unsigned int RPONumber = 0; + for (auto RI = RPOT.begin(), RE = RPOT.end(); RI != RE; ++RI) { + OrderToBB[RPONumber] = *RI; + BBToOrder[*RI] = RPONumber; + Worklist.push(RPONumber); + ++RPONumber; + } + LiveIn.init(RPONumber); + LiveOut.init(RPONumber); + } + + // Perform the traversal. + // + // This is a standard "intersect of predecessor outs" dataflow problem. To + // solve it, we perform meet() and process() using the two worklist method + // until the LiveIn data for each block becomes unchanging. + // + // This dataflow is essentially working on maps of sets and at each meet we + // intersect the maps and the mapped sets. So, initialized live-in maps + // monotonically decrease in value throughout the dataflow. + SmallPtrSet Visited; + while (!Worklist.empty() || !Pending.empty()) { + // We track what is on the pending worklist to avoid inserting the same + // thing twice. We could avoid this with a custom priority queue, but + // this is probably not worth it. + SmallPtrSet OnPending; + LLVM_DEBUG(dbgs() << "Processing Worklist\n"); + while (!Worklist.empty()) { + BasicBlock *BB = OrderToBB[Worklist.top()]; + LLVM_DEBUG(dbgs() << "\nPop BB " << BB->getName() << "\n"); + Worklist.pop(); + bool InChanged = meet(*BB, Visited); + // Always consider LiveIn changed on the first visit. + InChanged |= Visited.insert(BB).second; + if (InChanged) { + LLVM_DEBUG(dbgs() + << BB->getName() << " has new InLocs, process it\n"); + // Mutate a copy of LiveIn while processing BB. Once we've processed + // the terminator LiveSet is the LiveOut set for BB. + // This is an expensive copy! + Map LiveSet = LiveIn[BB]; + + // Process the instructions in the block. + process(*BB, LiveSet); + + // Relatively expensive check: has anything changed in LiveOut for BB? + if (!equ(LiveOut[BB], LiveSet)) { + LLVM_DEBUG(dbgs() << BB->getName() + << " has new OutLocs, add succs to worklist: [ "); + LiveOut[BB] = std::move(LiveSet); + for (auto I = succ_begin(BB), E = succ_end(BB); I != E; I++) { + if (OnPending.insert(*I).second) { + LLVM_DEBUG(dbgs() << I->getName() << " "); + Pending.push(BBToOrder[*I]); + } + } + LLVM_DEBUG(dbgs() << "]\n"); + } + } + } + Worklist.swap(Pending); + // At this point, pending must be empty, since it was just the empty + // worklist + assert(Pending.empty() && "Pending should be empty"); + } + + // Insert new location defs. + for (auto Pair : BBInsertBeforeMap) { + InsertMap &Map = Pair.second; + for (auto Pair : Map) { + Instruction *InsertBefore = Pair.first; + assert(InsertBefore && "should never be null"); + auto FragMemLocs = Pair.second; + auto &Ctx = Fn.getContext(); + + for (auto FragMemLoc : FragMemLocs) { + DIExpression *Expr = DIExpression::get(Ctx, None); + Expr = *DIExpression::createFragmentExpression( + Expr, FragMemLoc.OffsetInBits, FragMemLoc.SizeInBits); + Expr = DIExpression::prepend(Expr, DIExpression::DerefAfter, + FragMemLoc.OffsetInBits / 8); + DebugVariable Var(Aggregates[FragMemLoc.Var].first, Expr, + FragMemLoc.DL.getInlinedAt()); + FnVarLocs->addVarLoc(InsertBefore, Var, Expr, FragMemLoc.DL, + Bases[FragMemLoc.Base]); + } + } + } + } +}; /// AssignmentTrackingLowering encapsulates a dataflow analysis over a function /// that interprets assignment tracking debug info metadata and stores in IR to @@ -1573,10 +2191,17 @@ bool Changed = false; - AssignmentTrackingLowering Pass(Fn, Layout, &VarsWithStackSlot); - Changed = Pass.run(FnVarLocs); + // Use a scope block to clean up AssignmentTrackingLowering before running + // MemLocFragmentFill to reduce peak memory consumption. + { + AssignmentTrackingLowering Pass(Fn, Layout, &VarsWithStackSlot); + Changed = Pass.run(FnVarLocs); + } if (Changed) { + MemLocFragmentFill Pass(Fn, &VarsWithStackSlot); + Pass.run(FnVarLocs); + // Remove redundant entries. As well as reducing memory consumption and // avoiding waiting cycles later by burning some now, this has another // important job. That is to work around some SelectionDAG quirks. See