Index: llvm/lib/CodeGen/InlineSpiller.cpp =================================================================== --- llvm/lib/CodeGen/InlineSpiller.cpp +++ llvm/lib/CodeGen/InlineSpiller.cpp @@ -827,8 +827,17 @@ if (Ops.back().first != MI || MI->isBundled()) return false; - bool WasCopy = MI->isCopy(); Register ImpReg; + bool WasCopy = MI->isCopy(); + Register CopyReg = MCRegister::NoRegister; + if (WasCopy && MI->getNumOperands() == 2) { + assert(MI->getOperand(0).isDef() && "operator 0 should be a def"); + assert(MI->getOperand(1).isUse() && "operator 1 should be a use"); + + MachineOperand &MO = MI->getOperand(1); + if (MO.isReg() && !MO.isImplicit()) + CopyReg = MO.getReg(); + } // TII::foldMemoryOperand will do what we need here for statepoint // (fold load into use and remove corresponding def). We will replace @@ -947,9 +956,17 @@ FoldMI->RemoveOperand(i - 1); } - LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS, - "folded")); - + auto TryKillReg = [&]() { + assert(WasCopy && "old machine instruction must be a copy instruction"); + SmallVector, 8> SpillMIOps; + VirtRegInfo RI = AnalyzeVirtRegInBundle(*FoldMI, CopyReg, &SpillMIOps); + for (const auto &OpPair : SpillMIOps) { + MachineOperand &MO = OpPair.first->getOperand(OpPair.second); + if (MO.isUse()) + if (!OpPair.first->isRegTiedToDefOperand(OpPair.second)) + MO.setIsKill(); + } + }; if (!WasCopy) ++NumFolded; else if (Ops.front().second == 0) { @@ -957,10 +974,16 @@ // If there is only 1 store instruction is required for spill, add it // to mergeable list. In X86 AMX, 2 intructions are required to store. // We disable the merge for this case. - if (std::distance(MIS.begin(), MIS.end()) <= 1) + if (std::distance(MIS.begin(), MIS.end()) <= 1) { + if (CopyReg.isValid()) + TryKillReg(); HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original); + } } else ++NumReloads; + + LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS, + "folded")); return true; } Index: llvm/lib/CodeGen/LiveDebugValues/VarLocBasedImpl.cpp =================================================================== --- llvm/lib/CodeGen/LiveDebugValues/VarLocBasedImpl.cpp +++ llvm/lib/CodeGen/LiveDebugValues/VarLocBasedImpl.cpp @@ -1310,14 +1310,16 @@ return (MO.isReg() && MO.getReg()) || MO.isImm() || MO.isFPImm() || MO.isCImm(); })) { - // Use normal VarLoc constructor for registers and immediates. - VarLoc VL(MI, LS); - // End all previous ranges of VL.Var. - OpenRanges.erase(VL); + if (!MI.isIndirectDebugValue()) { + // Use normal VarLoc constructor for registers and immediates. + VarLoc VL(MI, LS); + // End all previous ranges of VL.Var. + OpenRanges.erase(VL); - LocIndices IDs = VarLocIDs.insert(VL); - // Add the VarLoc to OpenRanges from this DBG_VALUE. - OpenRanges.insert(IDs, VL); + LocIndices IDs = VarLocIDs.insert(VL); + // Add the VarLoc to OpenRanges from this DBG_VALUE. + OpenRanges.insert(IDs, VL); + } } else if (MI.memoperands().size() > 0) { llvm_unreachable("DBG_VALUE with mem operand encountered after regalloc?"); } else { @@ -2175,8 +2177,18 @@ // operate with registers that correspond to user variables. // First load any pending inlocs. OpenRanges.insertFromLocSet(getVarLocsInMBB(MBB, InLocs), VarLocIDs); - for (auto &MI : *MBB) + for (auto &MI : *MBB) { +#if !defined(NDEBUG) + MI.dump(); + for (uint64_t ID : OpenRanges.getSpillVarLocs()) { + LocIndex Idx = LocIndex::fromRawInteger(ID); + const VarLoc &VL = VarLocIDs[Idx]; + assert(VL.containsSpillLocs() && "Broken VarLocSet?"); + VL.dump(TRI); + } +#endif process(MI, OpenRanges, VarLocIDs, Transfers); + } OLChanged |= transferTerminator(MBB, OpenRanges, OutLocs, VarLocIDs); LLVM_DEBUG(printVarLocInMBB(MF, OutLocs, VarLocIDs,