Index: llvm/lib/CodeGen/CMakeLists.txt =================================================================== --- llvm/lib/CodeGen/CMakeLists.txt +++ llvm/lib/CodeGen/CMakeLists.txt @@ -183,6 +183,7 @@ LiveDebugValues/LiveDebugValues.cpp LiveDebugValues/VarLocBasedImpl.cpp + LiveDebugValues/InstrRefBasedImpl.cpp ADDITIONAL_HEADER_DIRS ${LLVM_MAIN_INCLUDE_DIR}/llvm/CodeGen Index: llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp =================================================================== --- /dev/null +++ llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp @@ -0,0 +1,2969 @@ +//===- InstrRefBasedImpl.cpp - Tracking Debug Value MIs -------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// \file InstrRefBasedImpl.cpp +/// +/// This is a separate implementation of LiveDebugValues, see +/// LiveDebugValues.cpp and VarLocBasedImpl.cpp for more information. +/// +/// This pass propagates variable locations between basic blocks, resolving +/// control flow conflicts between them. The problem is much like SSA +/// construction, where each DBG_VALUE instruction assigns the *value* that +/// a variable has, and every instruction where the variable is in scope uses +/// that variable. The resulting map of instruction-to-value is then translated +/// into a register (or spill) location for each variable over each instruction. +/// +/// This pass determines which DBG_VALUE dominates which instructions, or if +/// none do, where values must be merged (like PHI nodes). The added +/// complication is that because codegen has already finished, a PHI node may +/// be needed for a variable location to be correct, but no register or spill +/// slot merges the necessary values. In these circumstances, the variable +/// location is dropped. +/// +/// What makes this analysis non-trivial is loops: we cannot tell in advance +/// whether a variable location is live throughout a loop, or whether its +/// location is clobbered (or redefined by another DBG_VALUE), without +/// exploring all the way through. +/// +/// To make this simpler we perform two kinds of analysis. First, we identify +/// every value defined by every instruction (ignoring those that only move +/// another value), then compute a map of which values are available for each +/// instruction. This is stronger than a reaching-def analysis, as we create +/// PHI values where other values merge. +/// +/// Secondly, for each variable, we effectively re-construct SSA using each +/// DBG_VALUE as a def. The DBG_VALUEs read a value-number computed by the +/// first analysis from the location they refer to. We can then compute the +/// dominance frontiers of where a variable has a value, and create PHI nodes +/// where they merge. +/// This isn't precisely SSA-construction though, because the function shape +/// is pre-defined. If a variable location requires a PHI node, but no +/// PHI for the relevant values is present in the function (as computed by the +/// first analysis), the location must be dropped. +/// +/// Once both are complete, we can pass back over all instructions knowing: +/// * What _value_ each variable should contain, either defined by an +/// instruction or where control flow merges +/// * What the location of that value is (if any). +/// Allowing us to create appropriate live-in DBG_VALUEs, and DBG_VALUEs when +/// a value moves location. After this pass runs, all variable locations within +/// a block should be specified by DBG_VALUEs within that block, allowing +/// DbgEntityHistoryCalculator to focus on individual blocks. +/// +/// This pass is able to go fast because the size of the first +/// reaching-definition analysis is proportional to the working-set size of +/// the function, which the compiler tries to keep small. (It's also +/// proportional to the number of blocks). Additionally, we repeatedly perform +/// the second reaching-definition analysis with only the variables and blocks +/// in a single lexical scope, exploiting their locality. +/// +/// Determining where PHIs happen is trickier with this approach, and it comes +/// to a head in the major problem for LiveDebugValues: is a value live-through +/// a loop, or not? Your garden-variety dataflow analysis aims to build a set of +/// facts about a function, however this analysis needs to generate new value +/// numbers at joins. +/// +/// To do this, consider a lattice of all definition values, from instructions +/// and from PHIs. Each PHI is characterised by the RPO number of the block it +/// occurs in. Each value pair A, B can be ordered by RPO(A) < RPO(B): +/// with non-PHI values at the top, and any PHI value in the last block (by RPO +/// order) at the bottom. +/// +/// (Awkwardly: lower-down-the _lattice_ means a greater RPO _number_. Below, +/// "rank" always refers to the former). +/// +/// At any join, for each register, we consider: +/// * All incoming values, and +/// * The PREVIOUS live-in value at this join. +/// If all incoming values agree: that's the live-in value. If they do not, the +/// incoming values are ranked according to the partial order, and the NEXT +/// LOWEST rank after the PREVIOUS live-in value is picked (multiple values of +/// the same rank are ignored as conflicting). If there are no candidate values, +/// or if the rank of the live-in would be lower than the rank of the current +/// blocks PHIs, create a new PHI value. +/// +/// Intuitively: if it's not immediately obvious what value a join should result +/// in, we iteratively descend from instruction-definitions down through PHI +/// values, getting closer to the current block each time. If the current block +/// is a loop head, this ordering is effectively searching outer levels of +/// loops, to find a value that's live-through the current loop. +/// +/// If there is no value that's live-through this loop, a PHI is created for +/// this location instead. We can't use a lower-ranked PHI because by definition +/// it doesn't dominate the current block. We can't create a PHI value any +/// earlier, because we risk creating a PHI value at a location where values do +/// not in fact merge, thus misrepresenting the truth, and not making the true +/// live-through value for variable locations. +/// +/// This algorithm applies to both calculating the availability of values in +/// the first analysis, and the location of variables in the second. However +/// for the second we add an extra dimension of pain: creating a variable +/// location PHI is only valid if, for each incoming edge, +/// * There is a value for the variable on the incoming edge, and +/// * All the edges have that value in the same register. +/// Or put another way: we can only create a variable-location PHI if there is +/// a matching machine-location PHI, each input to which is the variables value +/// in the predecessor block. +/// +/// To accomodate this difference, each point on the lattice is split in +/// two: a "proposed" PHI and "definite" PHI. Any PHI that can immediately +/// have a location determined are "definite" PHIs, and no further work is +/// needed. Otherwise, a location that all non-backedge predecessors agree +/// on is picked and propagated as a "proposed" PHI value. If that PHI value +/// is truly live-through, it'll appear on the loop backedges on the next +/// dataflow iteration, after which the block live-in moves to be a "definite" +/// PHI. If it's not truly live-through, the variable value will be downgraded +/// further as we explore the lattice, or remains "proposed" and is considered +/// invalid once dataflow completes. +/// +/// ### Terminology +/// +/// A machine location is a register or spill slot, a value is something that's +/// defined by an instruction or PHI node, while a variable value is the value +/// assigned to a variable. A variable location is a machine location, that must +/// contain the appropriate variable value. A value that is a PHI node is +/// occasionally called an mphi. +/// +/// The first dataflow problem is the "machine value location" problem, +/// because we're determining which machine locations contain which values. +/// The "locations" are constant: what's unknown is what value they contain. +/// +/// The second dataflow problem (the one for variables) is the "variable value +/// problem", because it's determining what values a variable has, rather than +/// what location those values are placed in. Unfortunately, it's not that +/// simple, because producing a PHI value always involves picking a location. +/// This is an imperfection that we just have to accept, at least for now. +/// +/// TODO: +/// Overlapping fragments +/// Entry values +/// Add back DEBUG statements for debugging this +/// Collect statistics +/// +//===----------------------------------------------------------------------===// + +#include "llvm/ADT/CoalescingBitVector.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/PostOrderIterator.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/SmallSet.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/ADT/UniqueVector.h" +#include "llvm/CodeGen/LexicalScopes.h" +#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineMemOperand.h" +#include "llvm/CodeGen/MachineOperand.h" +#include "llvm/CodeGen/PseudoSourceValue.h" +#include "llvm/CodeGen/RegisterScavenging.h" +#include "llvm/CodeGen/TargetFrameLowering.h" +#include "llvm/CodeGen/TargetInstrInfo.h" +#include "llvm/CodeGen/TargetLowering.h" +#include "llvm/CodeGen/TargetPassConfig.h" +#include "llvm/CodeGen/TargetRegisterInfo.h" +#include "llvm/CodeGen/TargetSubtargetInfo.h" +#include "llvm/Config/llvm-config.h" +#include "llvm/IR/DIBuilder.h" +#include "llvm/IR/DebugInfoMetadata.h" +#include "llvm/IR/DebugLoc.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/Module.h" +#include "llvm/InitializePasses.h" +#include "llvm/MC/MCRegisterInfo.h" +#include "llvm/Pass.h" +#include "llvm/Support/Casting.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include "LiveDebugValues.h" + +using namespace llvm; + +#define DEBUG_TYPE "livedebugvalues" + +STATISTIC(NumInserted, "Number of DBG_VALUE instructions inserted"); +STATISTIC(NumRemoved, "Number of DBG_VALUE instructions removed"); + +// Act more like the VarLoc implementation, by propagating some locations too +// far and ignoring some transfers. +static cl::opt EmulateOldLDV("emulate-old-livedebugvalues", cl::Hidden, + cl::desc("Act like old LiveDebugValues did"), + cl::init(false)); + +// Rely on isStoreToStackSlotPostFE and similar to observe all stack spills. +static cl::opt + ObserveAllStackops("observe-all-stack-ops", cl::Hidden, + cl::desc("Allow non-kill spill and restores"), + cl::init(false)); + +namespace { + +// The location at which a spilled value resides. It consists of a register and +// an offset. +struct SpillLoc { + unsigned SpillBase; + int SpillOffset; + bool operator==(const SpillLoc &Other) const { + return SpillBase == Other.SpillBase && SpillOffset == Other.SpillOffset; + } + bool operator<(const SpillLoc &Other) const { + return std::tie(SpillBase, SpillOffset) < + std::tie(Other.SpillBase, Other.SpillOffset); + } +}; + +// This is purely a number that's slightly more strongly typed, to avoid +// passing around raw integers. Identifies a register or spill slot, +// numerically. +enum LocIdx { limin = 0, limax = UINT_MAX }; + +#define NUM_LOC_BITS 24 + +/// Unique identifier for a value defined by an instruction, as a value type. +/// Casts back and forth to a uint64_t. Probably replacable with something less +/// bit-constrained. Each value identifies the instruction and machine location +/// where the value is defined, although there may be no corresponding machine +/// operand for it (ex: regmasks clobbering values). The instructions are +/// one-based, and definitions that are PHIs have instruction number zero. +/// +/// The obvious limits of a 1M block function or 1M instruction blocks are +/// problematic; but by that point we should probably have bailed out of +/// trying to analyse the function. +class ValueIDNum { +public: + uint64_t BlockNo : 20; /// The block where the def happens. + uint64_t InstNo : 20; /// The Instruction where the def happens. + /// One based, is distance from start of block. + LocIdx LocNo : NUM_LOC_BITS; /// The machine location where the def happens. + + uint64_t asU64() const { + uint64_t TmpBlock = BlockNo; + uint64_t TmpInst = InstNo; + return TmpBlock << 44ull | TmpInst << NUM_LOC_BITS | LocNo; + } + + static ValueIDNum fromU64(uint64_t v) { + LocIdx L = LocIdx(v & 0x3FFF); + return {v >> 44ull, ((v >> NUM_LOC_BITS) & 0xFFFFF), L}; + } + + bool operator<(const ValueIDNum &Other) const { + return asU64() < Other.asU64(); + } + + bool operator==(const ValueIDNum &Other) const { + return std::tie(BlockNo, InstNo, LocNo) == + std::tie(Other.BlockNo, Other.InstNo, Other.LocNo); + } + + bool operator!=(const ValueIDNum &Other) const { return !(*this == Other); } + + std::string asString(const std::string &mlocname) const { + return Twine("bb ") + .concat(Twine(BlockNo).concat(Twine(" inst ").concat( + Twine(InstNo).concat(Twine(" loc ").concat(Twine(mlocname)))))) + .str(); + } +}; + +} // end anonymous namespace + +// Boilerplate densemapinfo for ValueIDNum. +namespace llvm { +template <> struct DenseMapInfo { + // NB, there's a risk of overlap of uint64_max with legitmate numbering if + // there are very many machine locations. Fix in the future by not bit packing + // so hard. + static const uint64_t MaxVal = std::numeric_limits::max(); + + static inline ValueIDNum getEmptyKey() { return ValueIDNum::fromU64(MaxVal); } + + static inline ValueIDNum getTombstoneKey() { + return ValueIDNum::fromU64(MaxVal - 1); + } + + static unsigned getHashValue(ValueIDNum num) { + return hash_value(num.asU64()); + } + + static bool isEqual(const ValueIDNum &A, const ValueIDNum &B) { + return A == B; + } +}; + +} // end namespace llvm + +namespace { + +/// Meta qualifiers for a value. Pair of whatever expression is used to qualify +/// the the value, and Boolean of whether or not it's indirect. +class DbgValueProperties { +public: + DbgValueProperties(const DIExpression *DIExpr, bool Indirect) + : DIExpr(DIExpr), Indirect(Indirect) {} + + DbgValueProperties(const DbgValueProperties &Cpy) + : DIExpr(Cpy.DIExpr), Indirect(Cpy.Indirect) {} + + bool operator==(const DbgValueProperties &Other) const { + return DIExpr == Other.DIExpr && Indirect == Other.Indirect; + } + + bool operator!=(const DbgValueProperties &Other) const { + return !(*this == Other); + } + + const DIExpression *DIExpr; + bool Indirect; +}; + +/// Tracker for what values are in machine locations. Listens to the Things +/// being Done by various instructions, and maintains a table of what machine +/// locations have what values (as defined by a ValueIDNum). +/// +/// There are potentially a much larger number of machine locations on the +/// target machine than the actual working-set size of the function. On x86 for +/// example, we're extremely unlikely to want to track values through control +/// or debug registers. To avoid doing so, MLocTracker has several layers of +/// indirection going on, with two kinds of ``location'': +/// * A LocID uniquely identifies a register or spill location, with a +/// predictable value. +/// * A LocIdx is a key (in the database sense) for a LocID and a ValueIDNum. +/// Whenever a location is def'd or used by a MachineInstr, we automagically +/// create a new LocIdx for a location, but not otherwise. This ensures we only +/// account for locations that are actually used or defined. The cost is another +/// vector lookup (of LocID -> LocIdx) over any other implementation. This is +/// fairly cheap, and the compiler tries to reduce the working-set at any one +/// time in the function anyway. +/// +/// Register mask operands completely blow this out of the water; I've just +/// piled hacks on top of hacks to get around that. +/// +/// A zero LocIdx is reserved for "no value" or "no location". +class MLocTracker { +public: + MachineFunction &MF; + const TargetInstrInfo &TII; + const TargetRegisterInfo &TRI; + const TargetLowering &TLI; + + /// "Map" of LocIdxes to the ValueIDNums that they store. This is tightly + /// packed, entries only exist for locations that are being tracked. + std::vector LocIdxToIDNum; + + /// "Map" of machine location IDs (i.e., raw register or spill number) to the + /// LocIdx key / number for that location. There are always at least as many + /// as the number of registers on the target -- if the value in the register + /// is not being tracked, then the LocIdx value will be zero. New entries are + /// appended if a new spill slot begins being tracked. + /// This, and the corresponding reverse map persist for the analysis of the + /// whole function, and is necessarying for decoding various vectors of + /// values. + std::vector LocIDToLocIdx; + + /// Inverse map of LocIDToLocIdx. + std::map LocIdxToLocID; + + /// Unique-ification of spill slots. Used to number them -- their LocID + /// number is the index in SpillLocs minus one plus NumRegs. + UniqueVector SpillLocs; + + // If we discover a new machine location, assign it an mphi with this + // block number. + unsigned CurBB; + + /// Cached local copy of the number of registers the target has. + unsigned NumRegs; + + /// Collection of register mask operands that have been observed. Second part + /// of pair indicates the instruction that they happened in. Used to + /// reconstruct where defs happened if we start tracking a location later + /// on. + SmallVector, 32> Masks; + + MLocTracker(MachineFunction &MF, const TargetInstrInfo &TII, + const TargetRegisterInfo &TRI, const TargetLowering &TLI) + : MF(MF), TII(TII), TRI(TRI), TLI(TLI) { + NumRegs = TRI.getNumRegs(); + reset(); + LocIdxToIDNum.push_back({0, 0, LocIdx(0)}); + LocIDToLocIdx.resize(NumRegs); + LocIDToLocIdx.assign(NumRegs, LocIdx(0)); + LocIDToLocIdx[0] = LocIdx(0); + LocIdxToLocID[LocIdx(0)] = 0; + assert(NumRegs < (1u << NUM_LOC_BITS)); // Detect bit packing failure + } + + /// Produce location ID number for indexing LocIDToLocIdx. Takes the register + /// or spill number, and flag for whether it's a spill or not. + unsigned getLocID(unsigned RegOrSpill, bool isSpill) { + return (isSpill) ? RegOrSpill + NumRegs - 1 : RegOrSpill; + } + + /// Accessor for reading the value at Idx. + ValueIDNum getNumAtPos(LocIdx Idx) const { + assert(Idx < LocIdxToIDNum.size()); + return LocIdxToIDNum[Idx]; + } + + unsigned getNumLocs(void) const { return LocIdxToIDNum.size(); } + + /// Reset all locations to contain a PHI value at the designated block. Used + /// sometimes for actual PHI values, othertimes to indicate the block entry + /// value (before any more information is known). + void setMPhis(unsigned NewCurBB) { + CurBB = NewCurBB; + for (unsigned ID = 1; ID < LocIdxToIDNum.size(); ++ID) { + LocIdxToIDNum[LocIdx(ID)] = {CurBB, 0, LocIdx(ID)}; + } + } + + /// Load values for each location from array of ValueIDNums. Take current + /// bbnum just in case we read a value from a hitherto untouched register. + void loadFromArray(uint64_t *Locs, unsigned NewCurBB) { + CurBB = NewCurBB; + // Quickly reset everything to being itself at inst 0, representing a phi. + for (unsigned ID = 1; ID < LocIdxToIDNum.size(); ++ID) { + LocIdxToIDNum[LocIdx(ID)] = ValueIDNum::fromU64(Locs[ID]); + } + } + + /// Wipe records of what location have what values. + void reset(void) { + memset(&LocIdxToIDNum[0], 0, LocIdxToIDNum.size() * sizeof(ValueIDNum)); + Masks.clear(); + } + + /// Clear all data. Destroys the LocID <=> LocIdx map, which makes most of + /// the information in this pass uninterpretable. + void clear(void) { + reset(); + LocIDToLocIdx.clear(); + LocIdxToLocID.clear(); + LocIdxToIDNum.clear(); + // SpillsToMLocs.reset(); XXX can't reset? + SpillLocs = decltype(SpillLocs)(); + + LocIDToLocIdx.resize(NumRegs); + memset(&LocIDToLocIdx[0], 0, NumRegs * sizeof(LocIdx)); + } + + /// Set a locaiton to a certain value. + void setMLoc(LocIdx L, ValueIDNum Num) { + assert(L < LocIdxToIDNum.size()); + LocIdxToIDNum[L] = Num; + } + + /// Lookup a potentially untracked register ID, storing its LocIdx into Ref. + /// If ID was not tracked, initialize it to either an mphi value representing + /// a live-in, or a recent register mask clobber. + void bumpRegister(unsigned ID, LocIdx &Ref) { + assert(ID != 0); + if (Ref == 0) { + LocIdx NewIdx = LocIdx(LocIdxToIDNum.size()); + Ref = NewIdx; + + // Default: it's an mphi. + ValueIDNum ValNum = {CurBB, 0, NewIdx}; + // Was this reg ever touched by a regmask? + for (auto Rit = Masks.rbegin(); Rit != Masks.rend(); ++Rit) { + if (Rit->first->clobbersPhysReg(ID)) { + // There was an earlier def we skipped. + ValNum = {CurBB, Rit->second, NewIdx}; + break; + } + } + + LocIdxToIDNum.push_back(ValNum); + LocIdxToLocID[NewIdx] = ID; + } + } + + /// Record a definition of the specified register at the given block / inst. + /// This doesn't take a ValueIDNum, because the definition and its location + /// are synonymous. + void defReg(Register R, unsigned BB, unsigned Inst) { + unsigned ID = getLocID(R, false); + LocIdx &Idx = LocIDToLocIdx[ID]; + bumpRegister(ID, Idx); + ValueIDNum ValueID = {BB, Inst, Idx}; + LocIdxToIDNum[Idx] = ValueID; + } + + /// Set a register to a value number. To be used if the value number is + /// known in advance. + void setReg(Register R, ValueIDNum ValueID) { + unsigned ID = getLocID(R, false); + LocIdx &Idx = LocIDToLocIdx[ID]; + bumpRegister(ID, Idx); + LocIdxToIDNum[Idx] = ValueID; + } + + ValueIDNum readReg(Register R) { + unsigned ID = getLocID(R, false); + LocIdx &Idx = LocIDToLocIdx[ID]; + bumpRegister(ID, Idx); + return LocIdxToIDNum[Idx]; + } + + /// Reset a register value to zero / empty. Needed to replicate the + /// VarLoc implementation where a copy to/from a register effectively + /// clears the contents of the source register. (Values can only have one + /// machine location in VarLocBasedImpl). + void wipeRegister(Register R) { + unsigned ID = getLocID(R, false); + LocIdx Idx = LocIDToLocIdx[ID]; + LocIdxToIDNum[Idx] = {0, 0, LocIdx(0)}; + } + + /// Determine the LocIdx of an existing register. + LocIdx getRegMLoc(Register R) { + unsigned ID = getLocID(R, false); + return LocIDToLocIdx[ID]; + } + + /// Record a RegMask operand being executed. Defs any register we currently + /// track, stores a pointer to the mask in case we have to account for it + /// later. + void writeRegMask(const MachineOperand *MO, unsigned CurBB, unsigned InstID) { + // Ensure SP exists, so that we don't override it later. + unsigned SP = TLI.getStackPointerRegisterToSaveRestore(); + if (SP) { + unsigned ID = getLocID(SP, false); + LocIdx &Idx = LocIDToLocIdx[ID]; + bumpRegister(ID, Idx); + } + + // Def anything we already have that isn't preserved. + for (auto &P : LocIdxToLocID) { + // Don't clobber SP, even if the mask says it's clobbered. + if (P.second != 0 && P.second < NumRegs && P.second != SP && + MO->clobbersPhysReg(P.second)) + defReg(P.second, CurBB, InstID); + } + Masks.push_back(std::make_pair(MO, InstID)); + } + + /// Set the value stored in a spill slot. + void setSpill(SpillLoc L, ValueIDNum ValueID) { + unsigned SpillID = SpillLocs.idFor(L); + if (SpillID == 0) { + SpillID = SpillLocs.insert(L); + LocIDToLocIdx.push_back(LocIdx(0)); + unsigned L = getLocID(SpillID, true); + LocIdx Idx = LocIdx(LocIdxToIDNum.size()); // New idx + LocIDToLocIdx[L] = Idx; + LocIdxToLocID[Idx] = L; + LocIdxToIDNum.push_back(ValueID); + assert(Idx < (1u << NUM_LOC_BITS)); + } else { + unsigned L = getLocID(SpillID, true); + LocIdx Idx = LocIDToLocIdx[L]; + LocIdxToIDNum[Idx] = ValueID; + } + } + + /// Read whatever value is in a spill slot, or zero if it isn't tracked. + ValueIDNum readSpill(SpillLoc L) { + unsigned SpillID = SpillLocs.idFor(L); + if (SpillID == 0) + // Returning no location -> $noreg, no value. + return {0, 0, LocIdx(0)}; + + unsigned LocID = getLocID(SpillID, true); + unsigned LocIdx = LocIDToLocIdx[LocID]; + return LocIdxToIDNum[LocIdx]; + } + + /// Determine the LocIdx of a spill slot. + LocIdx getSpillMLoc(SpillLoc L) { + unsigned SpillID = SpillLocs.idFor(L); + if (SpillID == 0) + return LocIdx(0); + unsigned LocNo = getLocID(SpillID, true); + return LocIDToLocIdx[LocNo]; + } + + /// Return true if Idx is a spill machine location. + bool isSpill(LocIdx Idx) const { + auto IDIt = LocIdxToLocID.find(Idx); + assert(IDIt != LocIdxToLocID.end()); + return IDIt->second >= NumRegs; + } + + std::string LocIdxToName(LocIdx Idx) const { + auto IDIt = LocIdxToLocID.find(Idx); + assert(IDIt != LocIdxToLocID.end()); + unsigned ID = IDIt->second; + if (ID >= NumRegs) + return Twine("slot ").concat(Twine(ID - NumRegs)).str(); + else + return TRI.getRegAsmName(ID).str(); + } + + std::string IDAsString(const ValueIDNum &Num) const { + std::string DefName = LocIdxToName(Num.LocNo); + return Num.asString(DefName); + } + + LLVM_DUMP_METHOD + void dump() const { + for (unsigned int ID = 0; ID < LocIdxToIDNum.size(); ++ID) { + auto &ValueID = LocIdxToIDNum[ID]; + if (ValueID.LocNo == 0) + continue; + std::string MLocName = LocIdxToName(ValueID.LocNo); + std::string DefName = ValueID.asString(MLocName); + dbgs() << LocIdxToName(LocIdx(ID)) << " --> " << DefName << "\n"; + } + } + + LLVM_DUMP_METHOD + void dump_mloc_map() const { + for (unsigned I = 0; I < LocIdxToIDNum.size(); ++I) { + std::string foo = LocIdxToName(LocIdx(I)); + dbgs() << "Idx " << I << " " << foo << "\n"; + } + } + + /// Create a DBG_VALUE based on machine location \p MLoc. Qualify it with the + /// information in \pProperties, for variable Var. Don't insert it anywhere, + /// just return the builder for it. + MachineInstrBuilder emitLoc(LocIdx MLoc, const DebugVariable &Var, + const DbgValueProperties &Properties) { + DebugLoc DL = + DebugLoc::get(0, 0, Var.getVariable()->getScope(), Var.getInlinedAt()); + auto MIB = BuildMI(MF, DL, TII.get(TargetOpcode::DBG_VALUE)); + + const DIExpression *Expr = Properties.DIExpr; + unsigned Loc = LocIdxToLocID[MLoc]; + if (Loc >= NumRegs) { + const SpillLoc &Spill = SpillLocs[Loc - NumRegs + 1]; + Expr = DIExpression::prepend(Expr, DIExpression::ApplyOffset, + Spill.SpillOffset); + unsigned Base = Spill.SpillBase; + MIB.addReg(Base, RegState::Debug); + MIB.addImm(0); + } else { + MIB.addReg(Loc, RegState::Debug); + if (Properties.Indirect) + MIB.addImm(0); + else + MIB.addReg(0, RegState::Debug); + } + + MIB.addMetadata(Var.getVariable()); + MIB.addMetadata(Expr); + return MIB; + } +}; + +/// Class recording the (high level) _value_ of a variable. Identifies either +/// the value of the variable as a ValueIDNum, or a constant MachineOperand. +/// This class also stores meta-information about how the value is qualified. +/// Used to reason about variable values when performing the second +/// (DebugVariable specific) dataflow analysis. +class DbgValue { +public: + union { + /// If Kind is Def, the value number that this value is based on. + ValueIDNum ID; + /// If Kind is Const, the MachineOperand defining this value. + MachineOperand MO; + }; + /// Qualifiers for the ValueIDNum above. + DbgValueProperties Properties; + + typedef enum { + Def, // This value is defined by an inst, or is a PHI value. + Const, // A constant value contained in the MachineOperand field. + Proposed, // This is a tentative PHI value, which may be confirmed or + // invalidated later. + NoVal // Empty DbgValue. ID.BlockNo stores which block this was + // generated in. + } KindT; + /// Discriminator for whether this is a constant or an in-program value. + KindT Kind; + + void dump(const MLocTracker *MTrack) const { + if (Kind == Const) { + MO.dump(); + } else if (Kind == NoVal) { + dbgs() << "NoVal(" << ID.BlockNo << ")"; + } else if (Kind == Proposed) { + dbgs() << "VPHI(" << MTrack->IDAsString(ID) << ")"; + } else { + assert(Kind == Def); + dbgs() << MTrack->IDAsString(ID); + } + if (Properties.Indirect) + dbgs() << " indir"; + if (Properties.DIExpr) + dbgs() << " " << *Properties.DIExpr; + } + + bool operator==(const DbgValue &Other) const { + if (Kind != Other.Kind) + return false; + if (Kind == Const && !MO.isIdenticalTo(Other.MO)) + return false; + else if (Kind == Def && ID != Other.ID) + return false; + else if (Kind == Proposed && ID != Other.ID) + return false; + else if (Kind == NoVal && ID != Other.ID) + return false; + + return Properties == Other.Properties; + } + + bool operator!=(const DbgValue &Other) const { return !(*this == Other); } +}; + +/// Types for recording sets of variable fragments that overlap. For a given +/// local variable, we record all other fragments of that variable that could +/// overlap it, to reduce search time. +using FragmentOfVar = + std::pair; +using OverlapMap = + DenseMap>; + +/// Collection of DBG_VALUEs observed when traversing a block. Records each +/// variable and the value the DBG_VALUE refers to. Requires the machine value +/// location dataflow algorithm to have run already, so that values can be +/// identified. +class VLocTracker { +public: + /// Map DebugVariable to the latest Value it's defined to have. + /// Needs to be a mapvector because we determine order-in-the-input-MIR from + /// the order in this thing. + MapVector Vars; + DenseMap Scopes; + MachineBasicBlock *MBB; + +public: + VLocTracker() {} + + void defVar(const MachineInstr &MI, ValueIDNum ID) { + // XXX skipping overlapping fragments for now. + assert(MI.isDebugValue()); + DebugVariable Var(MI.getDebugVariable(), MI.getDebugExpression(), + MI.getDebugLoc()->getInlinedAt()); + DbgValueProperties Properties( + {MI.getDebugExpression(), MI.getOperand(1).isImm()}); + DbgValue Rec = {{ID}, Properties, DbgValue::Def}; + + // Attempt insertion; overwrite if it's already mapped. + auto Result = Vars.insert(std::make_pair(Var, Rec)); + if (!Result.second) + Result.first->second = Rec; + Scopes[Var] = MI.getDebugLoc().get(); + } + + void defVar(const MachineInstr &MI, const MachineOperand &MO) { + // XXX skipping overlapping fragments for now. + assert(MI.isDebugValue()); + DebugVariable Var(MI.getDebugVariable(), MI.getDebugExpression(), + MI.getDebugLoc()->getInlinedAt()); + DbgValueProperties Properties(MI.getDebugExpression(), + MI.getOperand(1).isImm()); + ValueIDNum EmptyValue = {0, 0, LocIdx(0)}; + DbgValue Rec = {{EmptyValue}, Properties, DbgValue::Const}; + Rec.MO = MO; + + // Attempt insertion; overwrite if it's already mapped. + auto Result = Vars.insert(std::make_pair(Var, Rec)); + if (!Result.second) + Result.first->second = Rec; + Scopes[Var] = MI.getDebugLoc().get(); + } +}; + +/// Tracker for converting machine value locations and variable values into +/// variable locations (the output of LiveDebugValues), recorded as DBG_VALUEs +/// specifying block live-in locations and transfers within blocks. +/// +/// Operating on a per-block basis, this class takes a (pre-loaded) MLocTracker +/// and must be initialized with the set of variable values that are live-in to +/// the block. The caller then repeatedly calls process(). TransferTracker picks +/// out variable locations for the live-in variable values (if there _is_ a +/// location) and creates the corresponding DBG_VALUEs. Then, as the block is +/// stepped through, transfers of values between machine locations are +/// identified and if profitable, a DBG_VALUE created. +/// +/// This is where debug use-before-defs would be resolved: a variable with an +/// unavailable value could materialize in the middle of a block, when the +/// value becomes available. Or, we could detect clobbers and re-specify the +/// variable in a backup location. (XXX these are unimplemented). +class TransferTracker { +public: + const TargetInstrInfo *TII; + /// This machine location tracker is assumed to always contain the up-to-date + /// value mapping for all machine locations. TransferTracker only reads + /// information from it. (XXX make it const?) + MLocTracker *MTracker; + MachineFunction &MF; + + /// Record of all changes in variable locations at a block position. Awkwardly + /// we allow inserting either before or after the point: MBB != nullptr + /// indicates it's before, otherwise after. + struct Transfer { + MachineBasicBlock::iterator Pos; /// Position to insert DBG_VALUes + MachineBasicBlock *MBB; /// non-null if we should insert after. + SmallVector Insts; /// Vector of DBG_VALUEs to insert. + }; + + typedef std::pair LocAndProperties; + + /// Collection of transfers (DBG_VALUEs) to be inserted. + SmallVector Transfers; + + /// Local cache of what-value-is-in-what-LocIdx. Used to identify differences + /// between TransferTrackers view of variable locations and MLocTrackers. For + /// example, MLocTracker observes all clobbers, but TransferTracker lazily + /// does not. + std::vector VarLocs; + + /// Map from LocIdxes to which DebugVariables are based that location. + /// Mantained while stepping through the block. Not accurate if + /// VarLocs[Idx] != MTracker->LocIdxToIDNum[Idx]. + std::map> ActiveMLocs; + + /// Map from DebugVariable to it's current location and qualifying meta + /// information. To be used in conjunction with ActiveMLocs to construct + /// enough information for the DBG_VALUEs for a particular LocIdx. + DenseMap ActiveVLocs; + + /// Temporary cache of DBG_VALUEs to be entered into the Transfers collection. + SmallVector PendingDbgValues; + + const TargetRegisterInfo &TRI; + const BitVector &CalleeSavedRegs; + + TransferTracker(const TargetInstrInfo *TII, MLocTracker *MTracker, + MachineFunction &MF, const TargetRegisterInfo &TRI, + const BitVector &CalleeSavedRegs) + : TII(TII), MTracker(MTracker), MF(MF), TRI(TRI), + CalleeSavedRegs(CalleeSavedRegs) {} + + /// Load object with live-in variable values. \p mlocs contains the live-in + /// values in each machine location, while \p vlocs the live-in variable + /// values. This method picks variable locations for the live-in variables, + /// creates DBG_VALUEs and puts them in #Transfers, then prepares the other + /// object fields to track variable locations as we step through the block. + /// FIXME: could just examine mloctracker instead of passing in \p mlocs? + void loadInlocs(MachineBasicBlock &MBB, uint64_t *MLocs, + SmallVectorImpl> &VLocs, + unsigned NumLocs) { + ActiveMLocs.clear(); + ActiveVLocs.clear(); + VarLocs.clear(); + VarLocs.resize(NumLocs); + + auto isCalleeSaved = [&](LocIdx L) { + unsigned Reg = MTracker->LocIdxToLocID[L]; + for (MCRegAliasIterator RAI(Reg, &TRI, true); RAI.isValid(); ++RAI) + if (CalleeSavedRegs.test(*RAI)) + return true; + return false; + }; + + // Map of the preferred location for each value. + DenseMap ValueToLoc; + + // Produce a map of value numbers to the current machine locs they live + // in. When emulating VarLocBasedImpl, there should only be one + // location; when not, we get to pick. + for (unsigned Idx = 1; Idx < NumLocs; ++Idx) { + auto VNum = ValueIDNum::fromU64(MLocs[Idx]); + VarLocs[Idx] = VNum; + auto it = ValueToLoc.find(VNum); + // If there's no location for this value yet; or it's a spill, or not a + /// preferred non-volatile register, then pick this location. + if (it == ValueToLoc.end() || MTracker->isSpill(it->second) || + !isCalleeSaved(it->second)) + ValueToLoc[VNum] = LocIdx(Idx); + } + + // Now map variables to their picked LocIdxes. + for (auto Var : VLocs) { + if (Var.second.Kind == DbgValue::Const) { + PendingDbgValues.push_back( + emitMOLoc(Var.second.MO, Var.first, Var.second.Properties)); + continue; + } + + // If the value has no location, we can't make a variable location. + auto ValuesPreferredLoc = ValueToLoc.find(Var.second.ID); + if (ValuesPreferredLoc == ValueToLoc.end()) + continue; + + LocIdx M = ValuesPreferredLoc->second; + auto NewValue = std::make_pair(M, Var.second.Properties); + auto Result = ActiveVLocs.insert(std::make_pair(Var.first, NewValue)); + if (!Result.second) + Result.first->second = NewValue; + ActiveMLocs[M].insert(Var.first); + assert(M != 0); + PendingDbgValues.push_back( + MTracker->emitLoc(M, Var.first, Var.second.Properties)); + } + flushDbgValues(MBB.begin(), &MBB); + } + + /// Helper to move created DBG_VALUEs into Transfers collection. + void flushDbgValues(MachineBasicBlock::iterator Pos, MachineBasicBlock *MBB) { + if (PendingDbgValues.size() > 0) { + Transfers.push_back({Pos, MBB, PendingDbgValues}); + PendingDbgValues.clear(); + } + } + + /// Handle a DBG_VALUE within a block. Terminate the variables current + /// location, and record the value its DBG_VALUE refers to, so that we can + /// detect location transfers later on. + void redefVar(const MachineInstr &MI) { + DebugVariable Var(MI.getDebugVariable(), MI.getDebugExpression(), + MI.getDebugLoc()->getInlinedAt()); + const MachineOperand &MO = MI.getOperand(0); + + // Erase any previous location, + auto It = ActiveVLocs.find(Var); + if (It != ActiveVLocs.end()) { + ActiveMLocs[It->second.first].erase(Var); + } + + // Insert a new variable location. Ignore non-register locations, we don't + // transfer those, and can't currently describe spill locs independently of + // regs. + // (This is because a spill location is a DBG_VALUE of the stack pointer). + if (!MO.isReg() || MO.getReg() == 0) { + if (It != ActiveVLocs.end()) + ActiveVLocs.erase(It); + return; + } + + Register Reg = MO.getReg(); + LocIdx MLoc = MTracker->getRegMLoc(Reg); + DbgValueProperties Properties(MI.getDebugExpression(), + MI.getOperand(1).isImm()); + + // Check whether our local copy of values-by-location in #VarLocs is out of + // date. Wipe old tracking data for the location if it's been clobbered in + // the meantime. + if (MTracker->getNumAtPos(MLoc) != VarLocs[MLoc]) { + for (auto &P : ActiveMLocs[MLoc]) { + ActiveVLocs.erase(P); + } + ActiveMLocs[MLoc].clear(); + VarLocs[MLoc] = MTracker->getNumAtPos(MLoc); + } + + ActiveMLocs[MLoc].insert(Var); + if (It == ActiveVLocs.end()) { + ActiveVLocs.insert(std::make_pair(Var, std::make_pair(MLoc, Properties))); + } else { + It->second.first = MLoc; + It->second.second = Properties; + } + } + + /// Explicitly terminate variable locations based on \p mloc. Creates undef + /// DBG_VALUEs for any variables that were located there, and clears + /// #ActiveMLoc / #ActiveVLoc tracking information for that location. + void clobberMloc(LocIdx MLoc, MachineBasicBlock::iterator Pos) { + assert(MTracker->isSpill(MLoc)); + auto ActiveMLocIt = ActiveMLocs.find(MLoc); + if (ActiveMLocIt == ActiveMLocs.end()) + return; + + VarLocs[MLoc] = ValueIDNum{0, 0, LocIdx(0)}; + + for (auto &Var : ActiveMLocIt->second) { + auto ActiveVLocIt = ActiveVLocs.find(Var); + // Create an undef. We can't feed in a nullptr DIExpression alas, + // so use the variables last expression. + const DIExpression *Expr = ActiveVLocIt->second.second.DIExpr; + LocIdx Idx = LocIdx(0); + DbgValueProperties Properties(Expr, false); + PendingDbgValues.push_back(MTracker->emitLoc(Idx, Var, Properties)); + ActiveVLocs.erase(ActiveVLocIt); + } + flushDbgValues(Pos, nullptr); + + ActiveMLocIt->second.clear(); + } + + /// Transfer variables based on \p Src to be based on \p Dst. This handles + /// both register copies as well as spills and restores. Creates DBG_VALUEs + /// describing the movement. + void transferMlocs(LocIdx Src, LocIdx Dst, MachineBasicBlock::iterator Pos) { + // Does Src still contain the value num we expect? If not, it's been + // clobbered in the meantime, and our variable locations are stale. + if (VarLocs[Src] != MTracker->getNumAtPos(Src)) + return; + + // assert(ActiveMLocs[Dst].size() == 0); + //^^^ Legitimate scenario on account of un-clobbered slot being assigned to? + ActiveMLocs[Dst] = ActiveMLocs[Src]; + VarLocs[Dst] = VarLocs[Src]; + + // For each variable based on Src; create a location at Dst. + for (auto &Var : ActiveMLocs[Src]) { + auto ActiveVLocIt = ActiveVLocs.find(Var); + assert(ActiveVLocIt != ActiveVLocs.end()); + ActiveVLocIt->second.first = Dst; + + assert(Dst != 0); + MachineInstr *MI = + MTracker->emitLoc(Dst, Var, ActiveVLocIt->second.second); + PendingDbgValues.push_back(MI); + } + ActiveMLocs[Src].clear(); + flushDbgValues(Pos, nullptr); + + // XXX XXX XXX "pretend to be old LDV" means dropping all tracking data + // about the old location. + if (EmulateOldLDV) + VarLocs[Src] = ValueIDNum{0, 0, LocIdx(0)}; + } + + MachineInstrBuilder emitMOLoc(const MachineOperand &MO, + const DebugVariable &Var, + const DbgValueProperties &Properties) { + DebugLoc DL = + DebugLoc::get(0, 0, Var.getVariable()->getScope(), Var.getInlinedAt()); + auto MIB = BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE)); + MIB.add(MO); + if (Properties.Indirect) + MIB.addImm(0); + else + MIB.addReg(0); + MIB.addMetadata(Var.getVariable()); + MIB.addMetadata(Properties.DIExpr); + return MIB; + } +}; + +class InstrRefBasedLDV : public LDVImpl { +private: + using FragmentInfo = DIExpression::FragmentInfo; + using OptFragmentInfo = Optional; + + // Helper while building OverlapMap, a map of all fragments seen for a given + // DILocalVariable. + using VarToFragments = + DenseMap>; + + /// Machine location/value transfer function, a mapping of which locations + // are assigned which new values. + typedef std::map MLocTransferMap; + + /// Live in/out structure for the variable values: a per-block map of + /// variables to their values. XXX, better name? + typedef DenseMap *> + LiveIdxT; + + typedef std::pair VarAndLoc; + + /// Type for a live-in value: the predecessor block, and its value. + typedef std::pair InValueT; + + /// Vector (per block) of a collection (inner smallvector) of live-ins. + /// Used as the result type for the variable value dataflow problem. + typedef SmallVector, 8> LiveInsT; + + const TargetRegisterInfo *TRI; + const TargetInstrInfo *TII; + const TargetFrameLowering *TFI; + BitVector CalleeSavedRegs; + LexicalScopes LS; + TargetPassConfig *TPC; + + /// Object to track machine locations as we step through a block. Could + /// probably be a field rather than a pointer, as it's always used. + MLocTracker *MTracker; + + /// Number of the current block LiveDebugValues is stepping through. + unsigned CurBB; + + /// Number of the current instruction LiveDebugValues is evaluating. + unsigned CurInst; + + /// Variable tracker -- listens to DBG_VALUEs occurring as InstrRefBasedImpl + /// steps through a block. Reads the values at each location from the + /// MLocTracker object. + VLocTracker *VTracker; + + /// Tracker for transfers, listens to DBG_VALUEs and transfers of values + /// between locations during stepping, creates new DBG_VALUEs when values move + /// location. + TransferTracker *TTracker; + + /// Blocks which are artificial, i.e. blocks which exclusively contain + /// instructions without DebugLocs, or with line 0 locations. + SmallPtrSet ArtificialBlocks; + + // Mapping of blocks to and from their RPOT order. + DenseMap OrderToBB; + DenseMap BBToOrder; + DenseMap BBNumToRPO; + + // Map of overlapping variable fragments. + OverlapMap OverlapFragments; + VarToFragments SeenFragments; + + /// Tests whether this instruction is a spill to a stack slot. + bool isSpillInstruction(const MachineInstr &MI, MachineFunction *MF); + + /// Decide if @MI is a spill instruction and return true if it is. We use 2 + /// criteria to make this decision: + /// - Is this instruction a store to a spill slot? + /// - Is there a register operand that is both used and killed? + /// TODO: Store optimization can fold spills into other stores (including + /// other spills). We do not handle this yet (more than one memory operand). + bool isLocationSpill(const MachineInstr &MI, MachineFunction *MF, + unsigned &Reg); + + /// If a given instruction is identified as a spill, return the spill slot + /// and set \p Reg to the spilled register. + Optional isRestoreInstruction(const MachineInstr &MI, + MachineFunction *MF, unsigned &Reg); + + /// Given a spill instruction, extract the register and offset used to + /// address the spill slot in a target independent way. + SpillLoc extractSpillBaseRegAndOffset(const MachineInstr &MI); + + /// Observe a single instruction while stepping through a block. + void process(MachineInstr &MI); + + /// Examines whether \p MI is a DBG_VALUE and notifies trackers. + /// \returns true if MI was recognized and processed. + bool transferDebugValue(const MachineInstr &MI); + + /// Examines whether \p MI is copy instruction, and notifies trackers. + /// \returns true if MI was recognized and processed. + bool transferRegisterCopy(MachineInstr &MI); + + /// Examines whether \p MI is stack spill or restore instruction, and + /// notifies trackers. \returns true if MI was recognized and processed. + bool transferSpillOrRestoreInst(MachineInstr &MI); + + /// Examines \p MI for any registers that it defines, and notifies trackers. + /// \returns true if MI was recognized and processed. + void transferRegisterDef(MachineInstr &MI); + + /// Copy one location to the other, accounting for movement of subregisters + /// too. + void performCopy(Register Src, Register Dst); + + void accumulateFragmentMap(MachineInstr &MI); + + /// Step through the function, recording register definitions and movements + /// in an MLocTracker. Convert the observations into a per-block transfer + /// function in \p MLocTransfer, suitable for using with the machine value + /// location dataflow problem. + void produceMLocTransferFunction(MachineFunction &MF, + SmallVectorImpl &MLocTransfer, + unsigned MaxNumBlocks); + + /// Solve the machine value location dataflow problem. Takes as input the + /// transfer functions in \p MLocTransfer. Writes the output live-in and + /// live-out arrays to the (initialized to zero) multidimensional arrays in + /// \p MInLocs and \p MOutLocs. The outer dimension is indexed by block + /// number, the inner by LocIdx. + void mlocDataflow(uint64_t **MInLocs, uint64_t **MOutLocs, + SmallVectorImpl &MLocTransfer); + + /// Perform a control flow join (lattice value meet) of the values in machine + /// locations at \p MBB. Follows the algorithm described in the file-comment, + /// reading live-outs of predecessors from \p OutLocs, the current live ins + /// from \p InLocs, and assigning the newly computed live ins back into + /// \p InLocs. \returns two bools -- the first indicates whether a change + /// was made, the second whether a lattice downgrade occurred. If the latter + /// is true, revisiting this block is necessary. + std::tuple + mlocJoin(MachineBasicBlock &MBB, + SmallPtrSet &Visited, + uint64_t **OutLocs, uint64_t *InLocs); + + /// Solve the variable value dataflow problem, for a single lexical scope. + /// Uses the algorithm from the file comment to resolve control flow joins, + /// although there are extra hacks, see vlocJoin. Reads the + /// locations of values from the \p MInLocs and \p MOutLocs arrays (see + /// mlocDataflow) and reads the variable values transfer function from + /// \p AllTheVlocs. Live-in and Live-out variable values are stored locally, + /// with the live-ins permanently stored to \p Output once the fixedpoint is + /// reached. + /// \p VarsWeCareAbout contains a collection of the variables in \p Scope + /// that we should be tracking. + /// \p AssignBlocks contains the set of blocks that aren't in \p Scope, but + /// which do contain DBG_VALUEs, which VarLocBasedImpl tracks locations + /// through. + void vlocDataflow(const LexicalScope *Scope, const DILocation *DILoc, + const SmallSet &VarsWeCareAbout, + SmallPtrSetImpl &AssignBlocks, + LiveInsT &Output, uint64_t **MOutLocs, uint64_t **MInLocs, + SmallVectorImpl &AllTheVLocs); + + /// Compute the live-ins to a block, considering control flow merges according + /// to the method in the file comment. Live out and live in variable values + /// are stored in \p VLOCOutLocs and \p VLOCInLocs. The live-ins for \p MBB + /// are computed and stored into \p VLOCInLocs. \returns true if the live-ins + /// are modified. + /// \p InLocsT Output argument, storage for calculated live-ins. + /// \returns two bools -- the first indicates whether a change + /// was made, the second whether a lattice downgrade occurred. If the latter + /// is true, revisiting this block is necessary. + std::tuple + vlocJoin(MachineBasicBlock &MBB, LiveIdxT &VLOCOutLocs, LiveIdxT &VLOCInLocs, + SmallPtrSet *VLOCVisited, + unsigned BBNum, const SmallSet &AllVars, + uint64_t **MOutLocs, uint64_t **MInLocs, + SmallPtrSet &InScopeBlocks, + SmallPtrSet &BlocksToExplore, + DenseMap &InLocsT); + + /// Continue exploration of the variable-value lattice, as explained in the + /// file-level comment. \p OldLiveInLocation contains the current + /// exploration position, from which we need to descend further. \p Values + /// contains the set of live-in values, \p CurBlockRPONum the RPO number of + /// the current block, and \p CandidateLocations a set of locations that + /// should be considered as PHI locations, if we reach the bottom of the + /// lattice. \returns true if we should downgrade; the value is the agreeing + /// value number in a non-backedge predecessor. + bool vlocDowngradeLattice(const MachineBasicBlock &MBB, + const DbgValue &OldLiveInLocation, + const SmallVectorImpl &Values, + unsigned CurBlockRPONum); + + /// For the given block and live-outs feeding into it, try to find a + /// machine location where they all join. If a solution for all predecessors + /// can't be found, a location where all non-backedge-predecessors join + /// will be returned instead. While this method finds a join location, this + /// says nothing as to whether it should be used. + /// \returns Pair of value ID if found, and true when the correct value + /// is available on all predecessor edges, or false if it's only available + /// for non-backedge predecessors. + std::tuple, bool> + pickVPHILoc(MachineBasicBlock &MBB, const DebugVariable &Var, + const LiveIdxT &LiveOuts, uint64_t **MOutLocs, uint64_t **MInLocs, + const SmallVectorImpl &BlockOrders); + + /// Given the solutions to the two dataflow problems, machine value locations + /// in \p MInLocs and live-in variable values in \p SavedLiveIns, runs the + /// TransferTracker class over the function to produce live-in and transfer + /// DBG_VALUEs, then inserts them. Groups of DBG_VALUEs are inserted in the + /// order given by AllVarsNumbering -- this could be any stable order, but + /// right now "order of appearence in function, when explored in RPO", so + /// that we can compare explictly against VarLocBasedImpl. + void emitLocations(MachineFunction &MF, LiveInsT SavedLiveIns, + uint64_t **MInLocs, + DenseMap &AllVarsNumbering); + + /// Boilerplate computation of some initial sets, artifical blocks and + /// RPOT block ordering. + void initialSetup(MachineFunction &MF); + + bool ExtendRanges(MachineFunction &MF, TargetPassConfig *TPC); + +public: + /// Default construct and initialize the pass. + InstrRefBasedLDV(); + + LLVM_DUMP_METHOD + void dump_mloc_transfer(const MLocTransferMap &mloc_transfer) const; + + bool isCalleeSaved(LocIdx L) { + unsigned Reg = MTracker->LocIdxToLocID[L]; + for (MCRegAliasIterator RAI(Reg, TRI, true); RAI.isValid(); ++RAI) + if (CalleeSavedRegs.test(*RAI)) + return true; + return false; + } +}; + +} // end anonymous namespace + +//===----------------------------------------------------------------------===// +// Implementation +//===----------------------------------------------------------------------===// + +/// Default construct and initialize the pass. +InstrRefBasedLDV::InstrRefBasedLDV() {} + +//===----------------------------------------------------------------------===// +// Debug Range Extension Implementation +//===----------------------------------------------------------------------===// + +#ifndef NDEBUG +// Something to restore in the future. +// void InstrRefBasedLDV::printVarLocInMBB(..) +#endif + +SpillLoc +InstrRefBasedLDV::extractSpillBaseRegAndOffset(const MachineInstr &MI) { + assert(MI.hasOneMemOperand() && + "Spill instruction does not have exactly one memory operand?"); + auto MMOI = MI.memoperands_begin(); + const PseudoSourceValue *PVal = (*MMOI)->getPseudoValue(); + assert(PVal->kind() == PseudoSourceValue::FixedStack && + "Inconsistent memory operand in spill instruction"); + int FI = cast(PVal)->getFrameIndex(); + const MachineBasicBlock *MBB = MI.getParent(); + Register Reg; + int Offset = TFI->getFrameIndexReference(*MBB->getParent(), FI, Reg); + return {Reg, Offset}; +} + +/// End all previous ranges related to @MI and start a new range from @MI +/// if it is a DBG_VALUE instr. +bool InstrRefBasedLDV::transferDebugValue(const MachineInstr &MI) { + if (!MI.isDebugValue()) + return false; + + const DILocalVariable *Var = MI.getDebugVariable(); + const DIExpression *Expr = MI.getDebugExpression(); + const DILocation *DebugLoc = MI.getDebugLoc(); + const DILocation *InlinedAt = DebugLoc->getInlinedAt(); + assert(Var->isValidLocationForIntrinsic(DebugLoc) && + "Expected inlined-at fields to agree"); + + DebugVariable V(Var, Expr, InlinedAt); + + // If there are no instructions in this lexical scope, do no location tracking + // at all, this variable shouldn't get a legitimate location range. + auto *Scope = LS.findLexicalScope(MI.getDebugLoc().get()); + if (Scope == nullptr) + return true; // handled it; by doing nothing + + const MachineOperand &MO = MI.getOperand(0); + + // MLocTracker needs to know that this register is read, even if it's only + // read by a debug inst. + if (MO.isReg() && MO.getReg() != 0) + (void)MTracker->readReg(MO.getReg()); + + // If we're preparing for the second analysis (variables), the machine value + // locations are already solved, and we report this DBG_VALUE and the value + // it refers to to VLocTracker. + if (VTracker) { + if (MO.isReg()) { + // Should read LocNo==0 on $noreg. + ValueIDNum Undef = {0, 0, LocIdx(0)}; + ValueIDNum ID = (MO.getReg()) ? MTracker->readReg(MO.getReg()) : Undef; + VTracker->defVar(MI, ID); + } else if (MI.getOperand(0).isImm() || MI.getOperand(0).isFPImm() || + MI.getOperand(0).isCImm()) { + VTracker->defVar(MI, MI.getOperand(0)); + } + } + + // If performing final tracking of transfers, report this variable definition + // to the TransferTracker too. + if (TTracker) + TTracker->redefVar(MI); + return true; +} + +void InstrRefBasedLDV::transferRegisterDef(MachineInstr &MI) { + // Meta Instructions do not affect the debug liveness of any register they + // define. + if (MI.isImplicitDef()) { + // Except when there's an implicit def, and the location it's defining has + // no value number. The whole point of an implicit def is to announce that + // the register is live, without be specific about it's value. So define + // a value if there isn't one already. + ValueIDNum Num = MTracker->readReg(MI.getOperand(0).getReg()); + // Has a legitimate value -> ignore the implicit def. + if (Num.LocNo != 0) + return; + // Otherwise, def it here. + } else if (MI.isMetaInstruction()) + return; + + MachineFunction *MF = MI.getMF(); + const TargetLowering *TLI = MF->getSubtarget().getTargetLowering(); + unsigned SP = TLI->getStackPointerRegisterToSaveRestore(); + + // Find the regs killed by MI, and find regmasks of preserved regs. + // Max out the number of statically allocated elements in `DeadRegs`, as this + // prevents fallback to std::set::count() operations. + SmallSet DeadRegs; + SmallVector RegMasks; + SmallVector RegMaskPtrs; + for (const MachineOperand &MO : MI.operands()) { + // Determine whether the operand is a register def. + if (MO.isReg() && MO.isDef() && MO.getReg() && + Register::isPhysicalRegister(MO.getReg()) && + !(MI.isCall() && MO.getReg() == SP)) { + // Remove ranges of all aliased registers. + for (MCRegAliasIterator RAI(MO.getReg(), TRI, true); RAI.isValid(); ++RAI) + // FIXME: Can we break out of this loop early if no insertion occurs? + DeadRegs.insert(*RAI); + } else if (MO.isRegMask()) { + RegMasks.push_back(MO.getRegMask()); + RegMaskPtrs.push_back(&MO); + } + } + + // Tell MLocTracker about all definitions, of regmasks and otherwise. + for (uint32_t DeadReg : DeadRegs) + MTracker->defReg(DeadReg, CurBB, CurInst); + + for (auto *MO : RegMaskPtrs) + MTracker->writeRegMask(MO, CurBB, CurInst); +} + +void InstrRefBasedLDV::performCopy(Register SrcRegNum, Register DstRegNum) { + ValueIDNum SrcValue = MTracker->readReg(SrcRegNum); + + MTracker->setReg(DstRegNum, SrcValue); + + // In all circumstances, re-def the super registers. It's definitely a new + // value now. This doesn't uniquely identify the composition of subregs, for + // example, two identical values in subregisters composed in different + // places would not get equal value numbers. + for (MCSuperRegIterator SRI(DstRegNum, TRI); SRI.isValid(); ++SRI) + MTracker->defReg(*SRI, CurBB, CurInst); + + // If we're emulating VarLocBasedImpl, just define all the subregisters. + // DBG_VALUEs of them will expect to be tracked from the DBG_VALUE, not + // through prior copies. + if (EmulateOldLDV) { + for (MCSubRegIndexIterator DRI(DstRegNum, TRI); DRI.isValid(); ++DRI) + MTracker->defReg(DRI.getSubReg(), CurBB, CurInst); + return; + } + + // Otherwise, actually copy subregisters from one location to another. + // XXX: in addition, any subregisters of DstRegNum that don't line up with + // the source register should be def'd. + for (MCSubRegIndexIterator SRI(SrcRegNum, TRI); SRI.isValid(); ++SRI) { + unsigned SrcSubReg = SRI.getSubReg(); + unsigned SubRegIdx = SRI.getSubRegIndex(); + unsigned DstSubReg = TRI->getSubReg(DstRegNum, SubRegIdx); + if (!DstSubReg) + continue; + + // Do copy. There are two matching subregisters, the source value should + // have been def'd when the super-reg was, the latter might not be tracked + // yet. + ValueIDNum CpyValue = SrcValue; + + // This will force SRcSubReg to be tracked, if it isn't yet. + (void)MTracker->readReg(SrcSubReg); + LocIdx SrcL = MTracker->getRegMLoc(SrcSubReg); + assert(SrcL); + (void)MTracker->readReg(DstSubReg); + LocIdx DstL = MTracker->getRegMLoc(DstSubReg); + assert(DstL); + (void)DstL; + CpyValue.LocNo = SrcL; + + MTracker->setReg(DstSubReg, CpyValue); + } +} + +bool InstrRefBasedLDV::isSpillInstruction(const MachineInstr &MI, + MachineFunction *MF) { + // TODO: Handle multiple stores folded into one. + if (!MI.hasOneMemOperand()) + return false; + + if (!MI.getSpillSize(TII) && !MI.getFoldedSpillSize(TII)) + return false; // This is not a spill instruction, since no valid size was + // returned from either function. + + return true; +} + +bool InstrRefBasedLDV::isLocationSpill(const MachineInstr &MI, + MachineFunction *MF, unsigned &Reg) { + if (!isSpillInstruction(MI, MF)) + return false; + + // XXX FIXME: On x86, isStoreToStackSlotPostFE returns '1' instead of an + // actual register number. + if (ObserveAllStackops) { + int FI; + Reg = TII->isStoreToStackSlotPostFE(MI, FI); + return Reg != 0; + } + + auto isKilledReg = [&](const MachineOperand MO, unsigned &Reg) { + if (!MO.isReg() || !MO.isUse()) { + Reg = 0; + return false; + } + Reg = MO.getReg(); + return MO.isKill(); + }; + + for (const MachineOperand &MO : MI.operands()) { + // In a spill instruction generated by the InlineSpiller the spilled + // register has its kill flag set. + if (isKilledReg(MO, Reg)) + return true; + if (Reg != 0) { + // Check whether next instruction kills the spilled register. + // FIXME: Current solution does not cover search for killed register in + // bundles and instructions further down the chain. + auto NextI = std::next(MI.getIterator()); + // Skip next instruction that points to basic block end iterator. + if (MI.getParent()->end() == NextI) + continue; + unsigned RegNext; + for (const MachineOperand &MONext : NextI->operands()) { + // Return true if we came across the register from the + // previous spill instruction that is killed in NextI. + if (isKilledReg(MONext, RegNext) && RegNext == Reg) + return true; + } + } + } + // Return false if we didn't find spilled register. + return false; +} + +Optional +InstrRefBasedLDV::isRestoreInstruction(const MachineInstr &MI, + MachineFunction *MF, unsigned &Reg) { + if (!MI.hasOneMemOperand()) + return None; + + // FIXME: Handle folded restore instructions with more than one memory + // operand. + if (MI.getRestoreSize(TII)) { + Reg = MI.getOperand(0).getReg(); + return extractSpillBaseRegAndOffset(MI); + } + return None; +} + +bool InstrRefBasedLDV::transferSpillOrRestoreInst(MachineInstr &MI) { + // XXX -- it's too difficult to implement VarLocBasedImpl's stack location + // limitations under the new model. Therefore, when comparing them, compare + // versions that don't attempt spills or restores at all. + if (EmulateOldLDV) + return false; + + MachineFunction *MF = MI.getMF(); + unsigned Reg; + Optional Loc; + + LLVM_DEBUG(dbgs() << "Examining instruction: "; MI.dump();); + + // First, if there are any DBG_VALUEs pointing at a spill slot that is + // written to, terminate that variable location. The value in memory + // will have changed. DbgEntityHistoryCalculator doesn't try to detect this. + if (isSpillInstruction(MI, MF)) { + Loc = extractSpillBaseRegAndOffset(MI); + + if (TTracker) { + LocIdx MLoc = MTracker->getSpillMLoc(*Loc); + if (MLoc != 0) + TTracker->clobberMloc(MLoc, MI.getIterator()); + } + } + + // Try to recognise spill and restore instructions that may transfer a value. + if (isLocationSpill(MI, MF, Reg)) { + Loc = extractSpillBaseRegAndOffset(MI); + auto ValueID = MTracker->readReg(Reg); + + // If the location is empty, produce a phi, signify it's the live-in value. + if (ValueID.LocNo == 0) + ValueID = {CurBB, 0, MTracker->getRegMLoc(Reg)}; + + MTracker->setSpill(*Loc, ValueID); + assert(MTracker->getSpillMLoc(*Loc) != 0); + + // Tell TransferTracker about this spill, produce DBG_VALUEs for it. + if (TTracker) + TTracker->transferMlocs(MTracker->getRegMLoc(Reg), + MTracker->getSpillMLoc(*Loc), MI.getIterator()); + + // VarLocBasedImpl would, at this point, stop tracking the source + // register of the store. + if (EmulateOldLDV) { + for (MCRegAliasIterator RAI(Reg, TRI, true); RAI.isValid(); ++RAI) + MTracker->defReg(*RAI, CurBB, CurInst); + } + } else { + if (!(Loc = isRestoreInstruction(MI, MF, Reg))) + return false; + + // Is there a value to be restored? + auto ValueID = MTracker->readSpill(*Loc); + if (ValueID.LocNo != 0) { + // XXX -- can we recover sub-registers of this value? Until we can, first + // overwrite all defs of the register being restored to. + for (MCRegAliasIterator RAI(Reg, TRI, true); RAI.isValid(); ++RAI) + MTracker->defReg(*RAI, CurBB, CurInst); + + // Now override the reg we're restoring to. + MTracker->setReg(Reg, ValueID); + assert(MTracker->getSpillMLoc(*Loc) != 0); + + // Report this restore to the transfer tracker too. + if (TTracker) + TTracker->transferMlocs(MTracker->getSpillMLoc(*Loc), + MTracker->getRegMLoc(Reg), MI.getIterator()); + } else { + // There isn't anything in the location; not clear if this is a code path + // that still runs. Def this register anyway just in case. + for (MCRegAliasIterator RAI(Reg, TRI, true); RAI.isValid(); ++RAI) + MTracker->defReg(*RAI, CurBB, CurInst); + + // Set the restored value to be a machine phi number, signifying that it's + // whatever the spills live-in value is in this block. + LocIdx L = MTracker->getSpillMLoc(*Loc); + ValueID = {CurBB, 0, L}; + MTracker->setReg(Reg, ValueID); + } + } + return true; +} + +bool InstrRefBasedLDV::transferRegisterCopy(MachineInstr &MI) { + auto DestSrc = TII->isCopyInstr(MI); + if (!DestSrc) + return false; + + const MachineOperand *DestRegOp = DestSrc->Destination; + const MachineOperand *SrcRegOp = DestSrc->Source; + + auto isCalleeSavedReg = [&](unsigned Reg) { + for (MCRegAliasIterator RAI(Reg, TRI, true); RAI.isValid(); ++RAI) + if (CalleeSavedRegs.test(*RAI)) + return true; + return false; + }; + + Register SrcReg = SrcRegOp->getReg(); + Register DestReg = DestRegOp->getReg(); + + // Ignore identity copies. Yep, these make it as far as LiveDebugValues. + if (SrcReg == DestReg) + return true; + + // For emulating VarLocBasedImpl: + // We want to recognize instructions where destination register is callee + // saved register. If register that could be clobbered by the call is + // included, there would be a great chance that it is going to be clobbered + // soon. It is more likely that previous register, which is callee saved, is + // going to stay unclobbered longer, even if it is killed. + // + // For InstrRefBasedImpl, we can track multiple locations per value, so + // ignore this condition. + if (EmulateOldLDV && !isCalleeSavedReg(DestReg)) + return false; + + // InstrRefBasedImpl only followed killing copies. + if (EmulateOldLDV && !SrcRegOp->isKill()) + return false; + + // Copy MTracker info, including subregs if available. + InstrRefBasedLDV::performCopy(SrcReg, DestReg); + + // Only produce a transfer of DBG_VALUE within a block where old LDV + // would have. We might make use of the additional value tracking in some + // other way, later. + if (TTracker && isCalleeSavedReg(DestReg) && SrcRegOp->isKill()) + TTracker->transferMlocs(MTracker->getRegMLoc(SrcReg), + MTracker->getRegMLoc(DestReg), MI.getIterator()); + + // VarLocBasedImpl would quit tracking the old location after copying. + if (EmulateOldLDV && SrcReg != DestReg) + MTracker->defReg(SrcReg, CurBB, CurInst); + + return true; +} + +/// Accumulate a mapping between each DILocalVariable fragment and other +/// fragments of that DILocalVariable which overlap. This reduces work during +/// the data-flow stage from "Find any overlapping fragments" to "Check if the +/// known-to-overlap fragments are present". +/// \param MI A previously unprocessed DEBUG_VALUE instruction to analyze for +/// fragment usage. +void InstrRefBasedLDV::accumulateFragmentMap(MachineInstr &MI) { + DebugVariable MIVar(MI.getDebugVariable(), MI.getDebugExpression(), + MI.getDebugLoc()->getInlinedAt()); + FragmentInfo ThisFragment = MIVar.getFragmentOrDefault(); + + // If this is the first sighting of this variable, then we are guaranteed + // there are currently no overlapping fragments either. Initialize the set + // of seen fragments, record no overlaps for the current one, and return. + auto SeenIt = SeenFragments.find(MIVar.getVariable()); + if (SeenIt == SeenFragments.end()) { + SmallSet OneFragment; + OneFragment.insert(ThisFragment); + SeenFragments.insert({MIVar.getVariable(), OneFragment}); + + OverlapFragments.insert({{MIVar.getVariable(), ThisFragment}, {}}); + return; + } + + // If this particular Variable/Fragment pair already exists in the overlap + // map, it has already been accounted for. + auto IsInOLapMap = + OverlapFragments.insert({{MIVar.getVariable(), ThisFragment}, {}}); + if (!IsInOLapMap.second) + return; + + auto &ThisFragmentsOverlaps = IsInOLapMap.first->second; + auto &AllSeenFragments = SeenIt->second; + + // Otherwise, examine all other seen fragments for this variable, with "this" + // fragment being a previously unseen fragment. Record any pair of + // overlapping fragments. + for (auto &ASeenFragment : AllSeenFragments) { + // Does this previously seen fragment overlap? + if (DIExpression::fragmentsOverlap(ThisFragment, ASeenFragment)) { + // Yes: Mark the current fragment as being overlapped. + ThisFragmentsOverlaps.push_back(ASeenFragment); + // Mark the previously seen fragment as being overlapped by the current + // one. + auto ASeenFragmentsOverlaps = + OverlapFragments.find({MIVar.getVariable(), ASeenFragment}); + assert(ASeenFragmentsOverlaps != OverlapFragments.end() && + "Previously seen var fragment has no vector of overlaps"); + ASeenFragmentsOverlaps->second.push_back(ThisFragment); + } + } + + AllSeenFragments.insert(ThisFragment); +} + +void InstrRefBasedLDV::process(MachineInstr &MI) { + // Try to interpret an MI as a debug or transfer instruction. Only if it's + // none of these should we interpret it's register defs as new value + // definitions. + if (transferDebugValue(MI)) + return; + if (transferRegisterCopy(MI)) + return; + if (transferSpillOrRestoreInst(MI)) + return; + transferRegisterDef(MI); +} + +void InstrRefBasedLDV::produceMLocTransferFunction( + MachineFunction &MF, SmallVectorImpl &MLocTransfer, + unsigned MaxNumBlocks) { + // Because we try to optimize around register mask operands by ignoring regs + // that aren't currently tracked, we set up something ugly for later: RegMask + // operands that are seen earlier than the first use of a register, still need + // to clobber that register in the transfer function. But this information + // isn't actively recorded. Instead, we track each RegMask used in each block, + // and accumulated the clobbered but untracked registers in each block into + // the following bitvector. Later, if new values are tracked, we can add + // appropriate clobbers. + SmallVector BlockMasks; + BlockMasks.resize(MaxNumBlocks); + + // Reserve one bit per register for the masks described above. + unsigned BVWords = MachineOperand::getRegMaskSize(TRI->getNumRegs()); + for (auto &BV : BlockMasks) + BV.resize(TRI->getNumRegs(), true); + + // Step through all instructions and inhale the transfer function. + for (auto &MBB : MF) { + // Object fields that are read by trackers to know where we are in the + // function. + CurBB = MBB.getNumber(); + CurInst = 1; + + // Set all machine locations to a PHI value. For transfer function + // production only, this signifies the live-in value to the block. + MTracker->reset(); + MTracker->setMPhis(CurBB); + + // Step through each instruction in this block. + for (auto &MI : MBB) { + process(MI); + // Also accumulate fragment map. + if (MI.isDebugValue()) + accumulateFragmentMap(MI); + ++CurInst; + } + + // Produce the transfer function, a map of machine location to new value. If + // any machine location has the live-in phi value from the start of the + // block, it's live-through and doesn't need recording in the transfer + // function. + for (unsigned IdxNum = 1; IdxNum < MTracker->getNumLocs(); ++IdxNum) { + LocIdx Idx = LocIdx(IdxNum); + ValueIDNum P = MTracker->getNumAtPos(Idx); + if (P.InstNo == 0 && P.LocNo == Idx) + continue; + + MLocTransfer[CurBB][Idx] = P; + } + + // Accumulate any bitmask operands into the clobberred reg mask for this + // block. + for (auto &P : MTracker->Masks) { + BlockMasks[CurBB].clearBitsNotInMask(P.first->getRegMask(), BVWords); + } + } + + // Compute a bitvector of all the registers that are tracked in this block. + const TargetLowering *TLI = MF.getSubtarget().getTargetLowering(); + unsigned SP = TLI->getStackPointerRegisterToSaveRestore(); + BitVector UsedRegs(TRI->getNumRegs()); + for (auto &P : MTracker->LocIdxToLocID) { + if (P.first == 0 || P.second >= TRI->getNumRegs() || P.second == SP) + continue; + UsedRegs.set(P.second); + } + + // Check that any regmask-clobber of a register that gets tracked, is not + // live-through in the transfer function. It needs to be clobbered at the + // very least. + for (unsigned int I = 0; I < MaxNumBlocks; ++I) { + BitVector &BV = BlockMasks[I]; + BV.flip(); + BV &= UsedRegs; + // This produces all the bits that we clobber, but also use. Check that + // they're all clobbered or at least set in the designated transfer + // elem. + for (unsigned Bit : BV.set_bits()) { + unsigned ID = MTracker->getLocID(Bit, false); + LocIdx Idx = MTracker->LocIDToLocIdx[ID]; + assert(Idx != 0); + ValueIDNum &ValueID = MLocTransfer[I][Idx]; + if (ValueID.BlockNo == I && ValueID.InstNo == 0) + // it was left as live-through. Set it to clobbered. + ValueID = ValueIDNum{0, 0, LocIdx(0)}; + } + } +} + +std::tuple +InstrRefBasedLDV::mlocJoin(MachineBasicBlock &MBB, + SmallPtrSet &Visited, + uint64_t **OutLocs, uint64_t *InLocs) { + LLVM_DEBUG(dbgs() << "join MBB: " << MBB.getNumber() << "\n"); + bool Changed = false; + bool DowngradeOccurred = false; + + // Collect predecessors that have been visited. Anything that hasn't been + // visited yet is a backedge on the first iteration, and the meet of it's + // lattice value for all locations will be unaffected. + SmallVector BlockOrders; + for (auto Pred : MBB.predecessors()) { + if (Visited.count(Pred)) { + BlockOrders.push_back(Pred); + } + } + + // Visit predecessors in RPOT order. + auto Cmp = [&](const MachineBasicBlock *A, const MachineBasicBlock *B) { + return BBToOrder.find(A)->second < BBToOrder.find(B)->second; + }; + llvm::sort(BlockOrders.begin(), BlockOrders.end(), Cmp); + + // Skip entry block. + if (BlockOrders.size() == 0) + return std::tuple(false, false); + + // Step through all machine locations, then look at each predecessor and + // detect disagreements. + unsigned ThisBlockRPO = BBToOrder.find(&MBB)->second; + for (unsigned Idx = 1; Idx < MTracker->getNumLocs(); ++Idx) { + // Pick out the first predecessors live-out value for this location. It's + // guaranteed to be not a backedge, as we order by RPO. + uint64_t BaseVal = OutLocs[BlockOrders[0]->getNumber()][Idx]; + + // Some flags for whether there's a disagreement, and whether it's a + // disagreement with a backedge or not. + bool Disagree = false; + bool NonBackEdgeDisagree = false; + + // Loop around everything that wasn't 'base'. + for (unsigned int I = 1; I < BlockOrders.size(); ++I) { + auto *MBB = BlockOrders[I]; + if (BaseVal != OutLocs[MBB->getNumber()][Idx]) { + // Live-out of a predecessor disagrees with the first predecessor. + Disagree = true; + + // Test whether it's a disagreemnt in the backedges or not. + if (BBToOrder.find(MBB)->second < ThisBlockRPO) // might be self b/e + NonBackEdgeDisagree = true; + } + } + + bool OverRide = false; + if (Disagree && !NonBackEdgeDisagree && + ValueIDNum::fromU64(InLocs[Idx]).LocNo != 0) { + // Only the backedges disagree, and we previously agreed on some value + // because we set the Live-In to be nonzero. Consider demoting the livein + // lattice value, as per the file level comment. The value we consider + // demoting to is the value that the non-backedge predecessors agree on. + // The order of values is that non-PHIs are \top, a PHI at this block + // \bot, and phis between the two are ordered by their RPO number. + // If there's no agreement, or we've already demoted to this PHI value + // before, replace with a PHI value at this block. + + // Calculate order numbers: zero means normal def, nonzero means RPO + // number. + ValueIDNum BaseID = ValueIDNum::fromU64(BaseVal); + unsigned BaseBlockRPONum = BBNumToRPO[BaseID.BlockNo] + 1; + if (BaseID.InstNo != 0) + BaseBlockRPONum = 0; + + ValueIDNum InLocID = ValueIDNum::fromU64(InLocs[Idx]); + unsigned InLocRPONum = BBNumToRPO[InLocID.BlockNo] + 1; + if (InLocID.InstNo != 0) + InLocRPONum = 0; + + // Should we ignore the disagreeing backedges, and override with the + // value the other predecessors agree on (in "base")? + unsigned ThisBlockRPONum = BBNumToRPO[MBB.getNumber()] + 1; + if (BaseBlockRPONum > InLocRPONum && BaseBlockRPONum < ThisBlockRPONum) { + // Override. + OverRide = true; + DowngradeOccurred = true; + } + } + // else: if we disagree in the non-backedges, then this is definitely + // a control flow merge where different values merge. Make it a PHI. + + // Generate a phi... + ValueIDNum PHI = {(uint64_t)MBB.getNumber(), 0, LocIdx(Idx)}; + uint64_t NewVal = (Disagree && !OverRide) ? PHI.asU64() : BaseVal; + if (InLocs[Idx] != NewVal) { + Changed |= true; + InLocs[Idx] = NewVal; + } + } + + // Uhhhhhh, reimplement NumInserted and NumRemoved pls. + return std::tuple(Changed, DowngradeOccurred); +} + +void InstrRefBasedLDV::mlocDataflow( + uint64_t **MInLocs, uint64_t **MOutLocs, + SmallVectorImpl &MLocTransfer) { + std::priority_queue, + std::greater> + Worklist, Pending; + + // We track what is on the current and pending worklist to avoid inserting + // the same thing twice. We could avoid this with a custom priority queue, + // but this is probably not worth it. + SmallPtrSet OnPending, OnWorklist; + + // Initialize worklist with every block to be visited. + for (unsigned int I = 0; I < BBToOrder.size(); ++I) { + Worklist.push(I); + OnWorklist.insert(OrderToBB[I]); + } + + MTracker->reset(); + + // Set inlocs for entry block -- each as a PHI at the entry block. Represents + // the incoming value to the function. + MTracker->setMPhis(0); + for (unsigned Idx = 1; Idx < MTracker->getNumLocs(); ++Idx) { + ValueIDNum Val = MTracker->getNumAtPos(LocIdx(Idx)); + uint64_t ID = Val.asU64(); + MInLocs[0][Idx] = ID; + } + + SmallPtrSet Visited; + while (!Worklist.empty() || !Pending.empty()) { + // Vector for storing the evaluated block transfer function. + SmallVector, 32> ToRemap; + + while (!Worklist.empty()) { + MachineBasicBlock *MBB = OrderToBB[Worklist.top()]; + CurBB = MBB->getNumber(); + Worklist.pop(); + + // Join the values in all predecessor blocks. + bool InLocsChanged, DowngradeOccurred; + std::tie(InLocsChanged, DowngradeOccurred) = + mlocJoin(*MBB, Visited, MOutLocs, MInLocs[CurBB]); + InLocsChanged |= Visited.insert(MBB).second; + + // If a downgrade occurred, book us in for re-examination on the next + // iteration. + if (DowngradeOccurred && OnPending.insert(MBB).second) + Pending.push(BBToOrder[MBB]); + + // Don't examine transfer function if we've visited this loc at least + // once, and inlocs haven't changed. + if (!InLocsChanged) + continue; + + // Load the current set of live-ins into MLocTracker. + MTracker->loadFromArray(MInLocs[CurBB], CurBB); + + // Each element of the transfer function can be a new def, or a read of + // a live-in value. Evaluate each element, and store to "ToRemap". + ToRemap.clear(); + for (auto &P : MLocTransfer[CurBB]) { + ValueIDNum NewID = {0, 0, LocIdx(0)}; + if (P.second.BlockNo == CurBB && P.second.InstNo == 0) { + // This is a movement of whatever was live in. Read it. + NewID = MTracker->getNumAtPos(P.second.LocNo); + } else { + // It's a def. Just set it. + assert(P.second.BlockNo == CurBB || P.second.LocNo == 0); + NewID = P.second; + } + ToRemap.push_back(std::make_pair(P.first, NewID)); + } + + // Commit the transfer function changes into mloc tracker, which + // transforms the contents of the MLocTracker into the live-outs. + for (auto &P : ToRemap) + MTracker->setMLoc(P.first, P.second); + + // Now copy out-locs from mloc tracker into out-loc vector, checking + // whether changes have occurred. These changes can have come from both + // the transfer function, and mlocJoin. + bool OLChanged = false; + for (unsigned Idx = 1; Idx < MTracker->getNumLocs(); ++Idx) { + uint64_t ID = MTracker->getNumAtPos(LocIdx(Idx)).asU64(); + OLChanged |= MOutLocs[CurBB][Idx] != ID; + MOutLocs[CurBB][Idx] = ID; + } + + MTracker->reset(); + + // No need to examine successors again if out-locs didn't change. + if (!OLChanged) + continue; + + // All successors should be visited: put any back-edges on the pending + // list for the next dataflow iteration, and any other successors to be + // visited this iteration, if they're not going to be already. + for (auto s : MBB->successors()) { + // Does branching to this successor represent a back-edge? + if (BBToOrder[s] > BBToOrder[MBB]) { + // No: visit it during this dataflow iteration. + if (OnWorklist.insert(s).second) + Worklist.push(BBToOrder[s]); + } else { + // Yes: visit it on the next iteration. + if (OnPending.insert(s).second) + Pending.push(BBToOrder[s]); + } + } + } + + Worklist.swap(Pending); + std::swap(OnPending, OnWorklist); + OnPending.clear(); + // At this point, pending must be empty, since it was just the empty + // worklist + assert(Pending.empty() && "Pending should be empty"); + } + + // Once all the live-ins don't change on mlocJoin(), we've reached a + // fixedpoint. +} + +bool InstrRefBasedLDV::vlocDowngradeLattice( + const MachineBasicBlock &MBB, const DbgValue &OldLiveInLocation, + const SmallVectorImpl &Values, unsigned CurBlockRPONum) { + // Ranking value preference: see file level comment, the highest rank is + // a plain def, followed by PHI values in reverse post-order. Numerically, + // we assign all defs the rank '0', all PHIs their blocks RPO number plus + // one, and consider the lowest value the highest ranked. + int OldLiveInRank = BBNumToRPO[OldLiveInLocation.ID.BlockNo] + 1; + if (OldLiveInLocation.ID.InstNo != 0) + OldLiveInRank = 0; + + // Allow any unresolvable conflict to be over-ridden. + if (OldLiveInLocation.Kind == DbgValue::NoVal) { + // Although if it was an unresolvable conflict from _this_ block, then + // all other seeking of downgrades and PHIs must have failed before hand. + if (OldLiveInLocation.ID.BlockNo == MBB.getNumber()) + return false; + OldLiveInRank = INT_MIN; + } + + auto &InValue = *Values[0].second; + + if (InValue.Kind == DbgValue::Const || InValue.Kind == DbgValue::NoVal) + return false; + + unsigned ThisRPO = BBNumToRPO[InValue.ID.BlockNo]; + int ThisRank = ThisRPO + 1; + if (InValue.ID.InstNo != 0) + ThisRank = 0; + + // Too far down the lattice? + if (ThisRPO >= CurBlockRPONum) + return false; + + // Higher in the lattice than what we've already explored? + if (ThisRank <= OldLiveInRank) + return false; + + return true; +} + +std::tuple, bool> InstrRefBasedLDV::pickVPHILoc( + MachineBasicBlock &MBB, const DebugVariable &Var, const LiveIdxT &LiveOuts, + uint64_t **MOutLocs, uint64_t **MInLocs, + const SmallVectorImpl &BlockOrders) { + // Collect a set of locations from predecessor where its live-out value can + // be found. + SmallVector, 8> Locs; + unsigned NumLocs = MTracker->getNumLocs(); + unsigned BackEdgesStart = 0; + + for (auto p : BlockOrders) { + // Pick out where backedges start in the list of predecessors. Relies on + // BlockOrders being sorted by RPO. + if (BBToOrder[p] < BBToOrder[&MBB]) + ++BackEdgesStart; + + // For each predecessor, create a new set of locations. + Locs.resize(Locs.size() + 1); + unsigned ThisBBNum = p->getNumber(); + auto LiveOutMap = LiveOuts.find(p); + if (LiveOutMap == LiveOuts.end()) + // This predecessor isn't in scope, it must have no live-in/live-out + // locations. + continue; + + auto It = LiveOutMap->second->find(Var); + if (It == LiveOutMap->second->end()) + // There's no value recorded for this variable in this predecessor, + // leave an empty set of locations. + continue; + + const DbgValue &OutVal = It->second; + ValueIDNum ValToLookFor = {0, 0, LocIdx(0)}; + + if (OutVal.Kind == DbgValue::Const || OutVal.Kind == DbgValue::NoVal) { + // Consts and no-values cannot have locations we can join on. + continue; + } else if (OutVal.Kind == DbgValue::Proposed) { + // Treat proposed values as any other value, and seek a location. + ValToLookFor = OutVal.ID; + } else { + assert(OutVal.Kind == DbgValue::Def); + ValToLookFor = OutVal.ID; + } + + // Search the live-outs of the predecessor for the specified value. + for (unsigned int I = 0; I < NumLocs; ++I) { + if (MOutLocs[ThisBBNum][I] == ValToLookFor.asU64()) + Locs.back().push_back(LocIdx(I)); + } + } + + // If there were no locations at all, return an empty result. + if (Locs.empty()) + return {None, false}; + + // Lambda for seeking a common location within a range of location-sets. + typedef SmallVector, 8>::iterator LocsIt; + auto SeekLocation = + [&Locs](llvm::iterator_range SearchRange) -> Optional { + // Starting with the first set of locations, take the intersection with + // subsequent sets. + SmallVector base = Locs[0]; + for (auto &S : SearchRange) { + SmallVector new_base; + std::set_intersection(base.begin(), base.end(), S.begin(), S.end(), + std::inserter(new_base, new_base.begin())); + base = new_base; + } + if (base.empty()) + return None; + + // We now have a set of LocIdxes that contain the right output value in + // each of the predecessors. Pick the lowest; if there's a register loc, + // that'll be it. + return *base.begin(); + }; + + // Search for a common location for all predecessors. If we can't, then fall + // back to only finding a common location between non-backedge predecessors. + bool ValidForAllLocs = true; + auto TheLoc = SeekLocation(Locs); + if (!TheLoc) { + ValidForAllLocs = false; + TheLoc = + SeekLocation(make_range(Locs.begin(), Locs.begin() + BackEdgesStart)); + } + + if (!TheLoc) + return {None, false}; + + // Return a PHI-value-number for the found location. + LocIdx L = *TheLoc; + ValueIDNum PHIVal = {(unsigned)MBB.getNumber(), 0, L}; + return {PHIVal, ValidForAllLocs}; +} + +std::tuple InstrRefBasedLDV::vlocJoin( + MachineBasicBlock &MBB, LiveIdxT &VLOCOutLocs, LiveIdxT &VLOCInLocs, + SmallPtrSet *VLOCVisited, unsigned BBNum, + const SmallSet &AllVars, uint64_t **MOutLocs, + uint64_t **MInLocs, + SmallPtrSet &InScopeBlocks, + SmallPtrSet &BlocksToExplore, + DenseMap &InLocsT) { + bool DowngradeOccurred = false; + + // To emulate VarLocBasedImpl, process this block if it's not in scope but + // _does_ assign a variable value. No live-ins for this scope are transferred + // in though, so we can return immediately. + if (InScopeBlocks.count(&MBB) == 0 && !ArtificialBlocks.count(&MBB)) { + if (VLOCVisited) + return std::tuple(true, false); + return std::tuple(false, false); + } + + LLVM_DEBUG(dbgs() << "join MBB: " << MBB.getNumber() << "\n"); + bool Changed = false; + + // Find any live-ins computed in a prior iteration. + auto ILSIt = VLOCInLocs.find(&MBB); + assert(ILSIt != VLOCInLocs.end()); + auto &ILS = *ILSIt->second; + + // Order predecessors by RPOT order, for exploring them in that order. + SmallVector BlockOrders; + for (auto p : MBB.predecessors()) + BlockOrders.push_back(p); + + auto Cmp = [&](MachineBasicBlock *A, MachineBasicBlock *B) { + return BBToOrder[A] < BBToOrder[B]; + }; + + llvm::sort(BlockOrders.begin(), BlockOrders.end(), Cmp); + + unsigned CurBlockRPONum = BBToOrder[&MBB]; + + // Force a re-visit to loop heads in the first dataflow iteration. + // FIXME: if we could "propose" Const values this wouldn't be needed, + // because they'd need to be confirmed before being emitted. + if (!BlockOrders.empty() && + BBToOrder[BlockOrders[BlockOrders.size() - 1]] >= CurBlockRPONum && + VLOCVisited) + DowngradeOccurred = true; + + auto ConfirmValue = [&InLocsT](const DebugVariable &DV, DbgValue VR) { + auto Result = InLocsT.insert(std::make_pair(DV, VR)); + (void)Result; + assert(Result.second); + }; + + auto ConfirmEmptyValue = [&ConfirmValue, &MBB](const DebugVariable &Var, const DbgValueProperties &Properties) { + unsigned BlockNo = MBB.getNumber(); + ValueIDNum tmp = {BlockNo, 0, LocIdx(0)}; + DbgValue NoLocPHIVal = {{tmp}, Properties, DbgValue::NoVal}; + + ConfirmValue(Var, NoLocPHIVal); + }; + + // Attempt to join the values for each variable. + for (auto &Var : AllVars) { + // Collect all the DbgValues for this variable. + SmallVector Values; + bool Bail = false; + unsigned BackEdgesStart = 0; + for (auto p : BlockOrders) { + // If the predecessor isn't in scope / to be explored, we'll never be + // able to join any locations. + if (BlocksToExplore.find(p) == BlocksToExplore.end()) { + Bail = true; + break; + } + + // Don't attempt to handle unvisited predecessors: they're implicitly + // "unknown"s in the lattice. + if (VLOCVisited && !VLOCVisited->count(p)) + continue; + + // If the predecessors OutLocs is absent, there's not much we can do. + auto OL = VLOCOutLocs.find(p); + if (OL == VLOCOutLocs.end()) { + Bail = true; + break; + } + + // No live-out value for this predecessor also means we can't produce + // a joined value. + auto VIt = OL->second->find(Var); + if (VIt == OL->second->end()) { + Bail = true; + break; + } + + // Keep track of where back-edges begin in the Values vector. Relies on + // BlockOrders being sorted by RPO. + unsigned ThisBBRPONum = BBToOrder[p]; + if (ThisBBRPONum < CurBlockRPONum) + ++BackEdgesStart; + + Values.push_back(std::make_pair(p, &VIt->second)); + } + + // If there were no values, or one of the predecessors couldn't have a + // value, then give up immediately. It's not safe to produce a live-in + // value. + if (Bail || Values.size() == 0) + continue; + + // Enumeration identifying the current state of the predecessors values. + enum { + Unset = 0, + Agreed, // All preds agree on the variable value. + PropDisagree, // All preds agree, but the value kind is Proposed in some. + BEDisagree, // Only back-edges disagree on variable value. + PHINeeded, // Non-back-edge predecessors have conflicing values. + NoSolution // Conflicting Value metadata makes solution impossible. + } OurState = Unset; + + // All (non-entry) blocks have at least one non-backedge predecessor. + // Pick the variable value from the first of these, to compare against + // all others. + const DbgValue &FirstVal = *Values[0].second; + const ValueIDNum &FirstID = FirstVal.ID; + + // Scan for variable values that can't be resolved: if they have different + // DIExpressions, different indirectness, or are mixed constants / + // non-constants. + for (auto &V : Values) { + if (V.second->Properties != FirstVal.Properties) + OurState = NoSolution; + if (V.second->Kind == DbgValue::Const && FirstVal.Kind != DbgValue::Const) + OurState = NoSolution; + } + + // Flags diagnosing _how_ the values disagree. + bool NonBackEdgeDisagree = false; + bool DisagreeOnPHINess = false; + bool IDDisagree = false; + bool Disagree = false; + if (OurState == Unset) { + for (auto &V : Values) { + if (*V.second == FirstVal) + continue; // No disagreement. + + Disagree = true; + + // Flag whether the value number actually diagrees. + if (V.second->ID != FirstID) + IDDisagree = true; + + // Distinguish whether disagreement happens in backedges or not. + // Relies on Values (and BlockOrders) being sorted by RPO. + unsigned ThisBBRPONum = BBToOrder[V.first]; + if (ThisBBRPONum < CurBlockRPONum) + NonBackEdgeDisagree = true; + + // Is there a difference in whether the value is definite or only + // proposed? + if (V.second->Kind != FirstVal.Kind && + (V.second->Kind == DbgValue::Proposed || + V.second->Kind == DbgValue::Def) && + (FirstVal.Kind == DbgValue::Proposed || + FirstVal.Kind == DbgValue::Def)) + DisagreeOnPHINess = true; + } + + // Collect those flags together and determine an overall state for + // what extend the predecessors agree on a live-in value. + if (!Disagree) + OurState = Agreed; + else if (!IDDisagree && DisagreeOnPHINess) + OurState = PropDisagree; + else if (!NonBackEdgeDisagree) + OurState = BEDisagree; + else + OurState = PHINeeded; + } + + // An extra indicator: if we only disagree on whether the value is a + // Def, or proposed, then also flag whether that disagreement happens + // in backedges only. + bool PropOnlyInBEs = Disagree && !IDDisagree && DisagreeOnPHINess && + !NonBackEdgeDisagree && FirstVal.Kind == DbgValue::Def; + + const auto &Properties = FirstVal.Properties; + + auto OldLiveInIt = ILS.find(Var); + const DbgValue *OldLiveInLocation = + (OldLiveInIt != ILS.end()) ? &OldLiveInIt->second : nullptr; + + bool OverRide = false; + if (OurState == BEDisagree && OldLiveInLocation) { + // Only backedges disagree: we can consider downgrading. If there was a + // previous live-in value, use it to work out whether the current + // incoming value represents a lattice downgrade or not. + OverRide = + vlocDowngradeLattice(MBB, *OldLiveInLocation, Values, CurBlockRPONum); + } + + // Use the current state of predecessor agreement and other flags to work + // out what to do next. Possibilities include: + // * Accept a value all predecessors agree on, or accept one that + // represents a step down the exploration lattice, + // * Use a PHI value number, if one can be found, + // * Propose a PHI value number, and see if it gets confirmed later, + // * Emit a 'NoVal' value, indicating we couldn't resolve anything. + if (OurState == Agreed) { + // Easiest solution: all predecessors agree on the variable value. + ConfirmValue(Var, FirstVal); + } else if (OurState == BEDisagree && OverRide) { + // Only backedges disagree, and the other predecessors have produced + // a new live-in value further down the exploration lattice. + DowngradeOccurred = true; + ConfirmValue(Var, FirstVal); + } else if (OurState == PropDisagree) { + // Predecessors agree on value, but some say it's only a proposed value. + // Propagate it as proposed: unless it was proposed in this block, in + // which case we're able to confirm the value. + if (FirstID.BlockNo == MBB.getNumber() && FirstID.InstNo == 0) { + ConfirmValue(Var, {{FirstID}, Properties, DbgValue::Def}); + } else if (PropOnlyInBEs) { + // If only backedges disagree, a higher (in RPO) block confirmed this + // location, and we need to propagate it into this loop. + ConfirmValue(Var, {{FirstID}, Properties, DbgValue::Def}); + } else { + // Otherwise; a Def meeting a Proposed is still a Proposed. + ConfirmValue(Var, {{FirstID}, Properties, DbgValue::Proposed}); + } + } else if ((OurState == PHINeeded || OurState == BEDisagree)) { + // Predecessors disagree and can't be downgraded: this can only be + // solved with a PHI. Use pickVPHILoc to go look for one. + Optional VPHI; + bool AllEdgesVPHI = false; + std::tie(VPHI, AllEdgesVPHI) = + pickVPHILoc(MBB, Var, VLOCOutLocs, MOutLocs, MInLocs, BlockOrders); + + if (VPHI && AllEdgesVPHI) { + // There's a PHI value that's valid for all predecessors -- we can use + // it. If any of the non-backedge predecessors have proposed values + // though, this PHI is also only proposed, until the predecessors are + // confirmed. + DbgValue::KindT K = DbgValue::Def; + for (unsigned int I = 0; I < BackEdgesStart; ++I) + if (Values[I].second->Kind == DbgValue::Proposed) + K = DbgValue::Proposed; + + ConfirmValue(Var, {{*VPHI}, Properties, K}); + } else if (VPHI) { + // There's a PHI value, but it's only legal for backedges. Leave this + // as a proposed PHI value: it might come back on the backedges, + // and allow us to confirm it in the future. + DbgValue NoBEValue = {{*VPHI}, Properties, DbgValue::Proposed}; + ConfirmValue(Var, NoBEValue); + } else { + ConfirmEmptyValue(Var, Properties); + } + } else { + // Otherwise: we don't know. Emit a "phi but no real loc" phi. + ConfirmEmptyValue(Var, Properties); + } + } + + // Store newly calculated in-locs into VLOCInLocs, if they've changed. + Changed = ILS != InLocsT; + if (Changed) + ILS = InLocsT; + + return std::tuple(Changed, DowngradeOccurred); +} + +void InstrRefBasedLDV::vlocDataflow( + const LexicalScope *Scope, const DILocation *DILoc, + const SmallSet &VarsWeCareAbout, + SmallPtrSetImpl &AssignBlocks, LiveInsT &Output, + uint64_t **MOutLocs, uint64_t **MInLocs, + SmallVectorImpl &AllTheVLocs) { + // This method is much like mlocDataflow: but focuses on a single + // LexicalScope at a time. Pick out a set of blocks and variables that are + // to have their value assignments solved, then run our dataflow algorithm + // until a fixedpoint is reached. + std::priority_queue, + std::greater> + Worklist, Pending; + SmallPtrSet OnWorklist, OnPending; + + // The set of blocks we'll be examining. + SmallPtrSet BlocksToExplore; + + // The order in which to examine them (RPO). + SmallVector BlockOrders; + + // RPO ordering function. + auto Cmp = [&](MachineBasicBlock *A, MachineBasicBlock *B) { + return BBToOrder[A] < BBToOrder[B]; + }; + + LS.getMachineBasicBlocks(DILoc, BlocksToExplore); + + // A separate container to distinguish "blocks we're exploring" versus + // "blocks that are potentially in scope. See comment at start of vlocJoin. + SmallPtrSet InScopeBlocks = BlocksToExplore; + + // Old LiveDebugValues tracks variable locations that come out of blocks + // not in scope, where DBG_VALUEs occur. This is something we could + // legitimately ignore, but lets allow it for now. + if (EmulateOldLDV) + BlocksToExplore.insert(AssignBlocks.begin(), AssignBlocks.end()); + + // We also need to propagate variable values through any artificial blocks + // that immediately follow blocks in scope. + DenseSet ToAdd; + + // Helper lambda: For a given block in scope, perform a depth first search + // of all the artificial successors, adding them to the ToAdd collection. + auto AccumulateArtificialBlocks = + [this, &ToAdd, &BlocksToExplore, + &InScopeBlocks](const MachineBasicBlock *MBB) { + // Depth-first-search state: each node is a block and which successor + // we're currently exploring. + SmallVector, + 8> + DFS; + + // Find any artificial successors not already tracked. + for (auto *succ : MBB->successors()) { + if (BlocksToExplore.count(succ) || InScopeBlocks.count(succ)) + continue; + if (!ArtificialBlocks.count(succ)) + continue; + DFS.push_back(std::make_pair(succ, succ->succ_begin())); + ToAdd.insert(succ); + } + + // Search all those blocks, depth first. + while (!DFS.empty()) { + const MachineBasicBlock *CurBB = DFS.back().first; + MachineBasicBlock::const_succ_iterator &CurSucc = DFS.back().second; + // Walk back if we've explored this blocks successors to the end. + if (CurSucc == CurBB->succ_end()) { + DFS.pop_back(); + continue; + } + + // If the current successor is artificial and unexplored, descend into + // it. + if (!ToAdd.count(*CurSucc) && ArtificialBlocks.count(*CurSucc)) { + DFS.push_back(std::make_pair(*CurSucc, (*CurSucc)->succ_begin())); + ToAdd.insert(*CurSucc); + continue; + } + + ++CurSucc; + } + }; + + // Search in-scope blocks and those containing a DBG_VALUE from this scope + // for artificial successors. + for (auto *MBB : BlocksToExplore) + AccumulateArtificialBlocks(MBB); + for (auto *MBB : InScopeBlocks) + AccumulateArtificialBlocks(MBB); + + BlocksToExplore.insert(ToAdd.begin(), ToAdd.end()); + InScopeBlocks.insert(ToAdd.begin(), ToAdd.end()); + + // Single block scope: not interesting! No propagation at all. Note that + // this could probably go above ArtificialBlocks without damage, but + // that then produces output differences from original-live-debug-values, + // which propagates from a single block into many artificial ones. + if (BlocksToExplore.size() == 1) + return; + + // Picks out relevants blocks RPO order and sort them. + for (auto *MBB : BlocksToExplore) + BlockOrders.push_back(const_cast(MBB)); + + llvm::sort(BlockOrders.begin(), BlockOrders.end(), Cmp); + unsigned NumBlocks = BlockOrders.size(); + + // Allocate some vectors for storing the live ins and live outs. Large. + SmallVector, 32> LiveIns, LiveOuts; + LiveIns.resize(NumBlocks); + LiveOuts.resize(NumBlocks); + + // Produce by-MBB indexes of live-in/live-outs, to ease lookup within + // vlocJoin. + LiveIdxT LiveOutIdx, LiveInIdx; + LiveOutIdx.reserve(NumBlocks); + LiveInIdx.reserve(NumBlocks); + for (unsigned I = 0; I < NumBlocks; ++I) { + LiveOutIdx[BlockOrders[I]] = &LiveOuts[I]; + LiveInIdx[BlockOrders[I]] = &LiveIns[I]; + } + + for (auto *MBB : BlockOrders) { + Worklist.push(BBToOrder[MBB]); + OnWorklist.insert(MBB); + } + + // Iterate over all the blocks we selected, propagating variable values. + bool FirstTrip = true; + SmallPtrSet VLOCVisited; + while (!Worklist.empty() || !Pending.empty()) { + while (!Worklist.empty()) { + auto *MBB = OrderToBB[Worklist.top()]; + CurBB = MBB->getNumber(); + Worklist.pop(); + + DenseMap JoinedInLocs; + + // Join values from predecessors. Updates LiveInIdx, and writes output + // into JoinedInLocs. + bool InLocsChanged, DowngradeOccurred; + std::tie(InLocsChanged, DowngradeOccurred) = vlocJoin( + *MBB, LiveOutIdx, LiveInIdx, (FirstTrip) ? &VLOCVisited : nullptr, + CurBB, VarsWeCareAbout, MOutLocs, MInLocs, InScopeBlocks, + BlocksToExplore, JoinedInLocs); + + // Always explore transfer function if inlocs changed, or if we've not + // visited this block before. + + InLocsChanged |= VLOCVisited.insert(MBB).second; + + // If a downgrade occurred, book us in for re-examination on the next + // iteration. + if (DowngradeOccurred && OnPending.insert(MBB).second) + Pending.push(BBToOrder[MBB]); + + if (!InLocsChanged) + continue; + + // Do transfer function. + auto &VTracker = AllTheVLocs[MBB->getNumber()]; + for (auto &Transfer : VTracker.Vars) { + // Is this var we're mangling in this scope? + if (VarsWeCareAbout.count(Transfer.first)) { + // Erase on empty transfer (DBG_VALUE $noreg). + if (Transfer.second.Kind == DbgValue::Def && + Transfer.second.ID.LocNo == 0) + JoinedInLocs.erase(Transfer.first); + else { + // Insert new variable value; or overwrite. + auto NewValuePair = std::make_pair(Transfer.first, Transfer.second); + auto Result = JoinedInLocs.insert(NewValuePair); + if (!Result.second) + Result.first->second = Transfer.second; + } + } + } + + // Did the live-out locations change? + bool OLChanged = JoinedInLocs != *LiveOutIdx[MBB]; + + // If they haven't changed, there's no need to explore further. + if (!OLChanged) + continue; + + // Commit to the live-out record. + *LiveOutIdx[MBB] = JoinedInLocs; + + // We should visit all successors. Ensure we'll visit any non-backedge + // successors during this dataflow iteration; book backedge successors + // to be visited next time around. + for (auto s : MBB->successors()) { + // Ignore out of scope / not-to-be-explored successors. + if (LiveInIdx.find(s) == LiveInIdx.end()) + continue; + + if (BBToOrder[s] > BBToOrder[MBB]) { + if (OnWorklist.insert(s).second) + Worklist.push(BBToOrder[s]); + } else if (OnPending.insert(s).second && (FirstTrip || OLChanged)) { + Pending.push(BBToOrder[s]); + } + } + } + Worklist.swap(Pending); + std::swap(OnWorklist, OnPending); + OnPending.clear(); + assert(Pending.empty()); + FirstTrip = false; + } + + // Dataflow done. Now what? Save live-ins. Ignore any that are still marked + // as being variable-PHIs, because those did not have their machine-PHI + // value confirmed. Such variable values are places that could have been + // PHIs, but are not. + for (auto *MBB : BlockOrders) { + auto &VarMap = *LiveInIdx[MBB]; + for (auto &P : VarMap) { + if (P.second.Kind == DbgValue::Proposed || + P.second.Kind == DbgValue::NoVal) + continue; + Output[MBB->getNumber()].push_back(P); + } + } + + BlockOrders.clear(); + BlocksToExplore.clear(); +} + +void InstrRefBasedLDV::dump_mloc_transfer( + const MLocTransferMap &mloc_transfer) const { + for (auto &P : mloc_transfer) { + std::string foo = MTracker->LocIdxToName(P.first); + std::string bar = MTracker->IDAsString(P.second); + dbgs() << "Loc " << foo << " --> " << bar << "\n"; + } +} + +void InstrRefBasedLDV::emitLocations( + MachineFunction &MF, LiveInsT SavedLiveIns, uint64_t **MInLocs, + DenseMap &AllVarsNumbering) { + TTracker = new TransferTracker(TII, MTracker, MF, *TRI, CalleeSavedRegs); + unsigned NumLocs = MTracker->getNumLocs(); + + // For each block, load in the machine value locations and variable value + // live-ins, then step through each instruction in the block. New DBG_VALUEs + // to be inserted will be created along the way. + for (MachineBasicBlock &MBB : MF) { + unsigned bbnum = MBB.getNumber(); + MTracker->reset(); + MTracker->loadFromArray(MInLocs[bbnum], bbnum); + TTracker->loadInlocs(MBB, MInLocs[bbnum], SavedLiveIns[MBB.getNumber()], + NumLocs); + + CurBB = bbnum; + CurInst = 1; + for (auto &MI : MBB) { + process(MI); + ++CurInst; + } + } + + // We have to insert DBG_VALUEs in a consistent order, otherwise they appeaer + // in DWARF in different orders. Use the order that they appear when walking + // through each block / each instruction, stored in AllVarsNumbering. + auto OrderDbgValues = [&](const MachineInstr *A, + const MachineInstr *B) -> bool { + DebugVariable VarA(A->getDebugVariable(), A->getDebugExpression(), + A->getDebugLoc()->getInlinedAt()); + DebugVariable VarB(B->getDebugVariable(), B->getDebugExpression(), + B->getDebugLoc()->getInlinedAt()); + return AllVarsNumbering.find(VarA)->second < + AllVarsNumbering.find(VarB)->second; + }; + + // Go through all the transfers recorded in the TransferTracker -- this is + // both the live-ins to a block, and any movements of values that happen + // in the middle. + for (auto &P : TTracker->Transfers) { + // Sort them according to appearance order. + llvm::sort(P.Insts.begin(), P.Insts.end(), OrderDbgValues); + // Insert either before or after the designated point... + if (P.MBB) { + MachineBasicBlock &MBB = *P.MBB; + for (auto *MI : P.Insts) { + MBB.insert(P.Pos, MI); + } + } else { + MachineBasicBlock &MBB = *P.Pos->getParent(); + for (auto *MI : P.Insts) { + MBB.insertAfter(P.Pos, MI); + } + } + } +} + +void InstrRefBasedLDV::initialSetup(MachineFunction &MF) { + // Build some useful data structures. + auto hasNonArtificialLocation = [](const MachineInstr &MI) -> bool { + if (const DebugLoc &DL = MI.getDebugLoc()) + return DL.getLine() != 0; + return false; + }; + // Collect a set of all the artificial blocks. + for (auto &MBB : MF) + if (none_of(MBB.instrs(), hasNonArtificialLocation)) + ArtificialBlocks.insert(&MBB); + + // Compute mappings of block <=> RPO order. + ReversePostOrderTraversal RPOT(&MF); + unsigned int RPONumber = 0; + for (auto RI = RPOT.begin(), RE = RPOT.end(); RI != RE; ++RI) { + OrderToBB[RPONumber] = *RI; + BBToOrder[*RI] = RPONumber; + BBNumToRPO[(*RI)->getNumber()] = RPONumber; + ++RPONumber; + } +} + +/// Calculate the liveness information for the given machine function and +/// extend ranges across basic blocks. +bool InstrRefBasedLDV::ExtendRanges(MachineFunction &MF, + TargetPassConfig *TPC) { + LLVM_DEBUG(dbgs() << "\nDebug Range Extension\n"); + this->TPC = TPC; + + TRI = MF.getSubtarget().getRegisterInfo(); + TII = MF.getSubtarget().getInstrInfo(); + TFI = MF.getSubtarget().getFrameLowering(); + TFI->getCalleeSaves(MF, CalleeSavedRegs); + LS.initialize(MF); + + MTracker = + new MLocTracker(MF, *TII, *TRI, *MF.getSubtarget().getTargetLowering()); + VTracker = nullptr; + TTracker = nullptr; + + SmallVector MLocTransfer; + SmallVector vlocs; + LiveInsT SavedLiveIns; + + int MaxNumBlocks = -1; + for (auto &MBB : MF) + MaxNumBlocks = std::max(MBB.getNumber(), MaxNumBlocks); + assert(MaxNumBlocks >= 0); + ++MaxNumBlocks; + + MLocTransfer.resize(MaxNumBlocks); + vlocs.resize(MaxNumBlocks); + SavedLiveIns.resize(MaxNumBlocks); + + initialSetup(MF); + + produceMLocTransferFunction(MF, MLocTransfer, MaxNumBlocks); + + // Allocate and initialize two array-of-arrays for the live-in and live-out + // machine values. The outer dimension is the block number; while the inner + // dimension is a LocIdx from MLocTracker. + uint64_t **MOutLocs = new uint64_t *[MaxNumBlocks]; + uint64_t **MInLocs = new uint64_t *[MaxNumBlocks]; + unsigned NumLocs = MTracker->getNumLocs(); + for (int i = 0; i < MaxNumBlocks; ++i) { + MOutLocs[i] = new uint64_t[NumLocs]; + memset(MOutLocs[i], 0, sizeof(uint64_t) * NumLocs); + MInLocs[i] = new uint64_t[NumLocs]; + memset(MInLocs[i], 0, sizeof(uint64_t) * NumLocs); + } + + // Solve the machine value dataflow problem using the MLocTransfer function, + // storing the computed live-ins / live-outs into the array-of-arrays. We use + // both live-ins and live-outs for decision making in the variable value + // dataflow problem. + mlocDataflow(MInLocs, MOutLocs, MLocTransfer); + + // Walk back through each block / instruction, collecting DBG_VALUE + // instructions and recording what machine value their operands refer to. + for (unsigned int I = 0; I < OrderToBB.size(); ++I) { + MachineBasicBlock &MBB = *OrderToBB[I]; + CurBB = MBB.getNumber(); + VTracker = &vlocs[CurBB]; + VTracker->MBB = &MBB; + MTracker->loadFromArray(MInLocs[CurBB], CurBB); + CurInst = 1; + for (auto &MI : MBB) { + process(MI); + ++CurInst; + } + MTracker->reset(); + } + + // Number all variables in the order that they appear, to be used as a stable + // insertion order later. + DenseMap AllVarsNumbering; + + // Map from one LexicalScope to all the variables in that scope. + DenseMap> ScopeToVars; + + // Map from One lexical scope to all blocks in that scope. + DenseMap> + ScopeToBlocks; + + // Store a DILocation that describes a scope. + DenseMap ScopeToDILocation; + + // To mirror old LiveDebugValues, enumerate variables in RPOT order. Otherwise + // the order is unimportant, it just has to be stable. + for (unsigned int I = 0; I < OrderToBB.size(); ++I) { + auto *MBB = OrderToBB[I]; + auto *VTracker = &vlocs[MBB->getNumber()]; + // Collect each variable with a DBG_VALUE in this block. + for (auto &idx : VTracker->Vars) { + const auto &Var = idx.first; + const DILocation *ScopeLoc = VTracker->Scopes[Var]; + assert(ScopeLoc != nullptr); + auto *Scope = LS.findLexicalScope(ScopeLoc); + + // No insts in scope -> shouldn't have been recorded. + assert(Scope != nullptr); + + AllVarsNumbering.insert(std::make_pair(Var, AllVarsNumbering.size())); + ScopeToVars[Scope].insert(Var); + ScopeToBlocks[Scope].insert(VTracker->MBB); + ScopeToDILocation[Scope] = ScopeLoc; + } + } + + // OK. Iterate over scopes: there might be something to be said for + // ordering them by size/locality, but that's for the future. For each scope, + // solve the variable value problem, producing a map of variables to values + // in SavedLiveIns. + for (auto &P : ScopeToVars) { + vlocDataflow(P.first, ScopeToDILocation[P.first], P.second, + ScopeToBlocks[P.first], SavedLiveIns, MOutLocs, MInLocs, + vlocs); + } + + // Using the computed value locations and variable values for each block, + // create the DBG_VALUE instructions representing the extended variable + // locations. + emitLocations(MF, SavedLiveIns, MInLocs, AllVarsNumbering); + + for (int Idx = 0; Idx < MaxNumBlocks; ++Idx) { + delete[] MOutLocs[Idx]; + delete[] MInLocs[Idx]; + } + delete[] MOutLocs; + delete[] MInLocs; + + // Did we actually make any changes? If we created any DBG_VALUEs, then yes. + bool Changed = TTracker->Transfers.size() != 0; + + delete MTracker; + VTracker = nullptr; + TTracker = nullptr; + + ArtificialBlocks.clear(); + OrderToBB.clear(); + BBToOrder.clear(); + BBNumToRPO.clear(); + + return Changed; +} + +LDVImpl *llvm::makeInstrRefBasedLiveDebugValues() { + return new InstrRefBasedLDV(); +} Index: llvm/lib/CodeGen/LiveDebugValues/LiveDebugValues.h =================================================================== --- llvm/lib/CodeGen/LiveDebugValues/LiveDebugValues.h +++ llvm/lib/CodeGen/LiveDebugValues/LiveDebugValues.h @@ -28,4 +28,5 @@ // Factory functions for LiveDebugValues implementations. extern LDVImpl *makeVarLocBasedLiveDebugValues(); +extern LDVImpl *makeInstrRefBasedLiveDebugValues(); } // namespace llvm