Index: llvm/lib/CodeGen/InlineSpiller.cpp =================================================================== --- llvm/lib/CodeGen/InlineSpiller.cpp +++ llvm/lib/CodeGen/InlineSpiller.cpp @@ -818,6 +818,11 @@ // foldMemoryOperand and signal foldPatchpoint that it is allowed to // fold them. bool UntieRegs = MI->getOpcode() == TargetOpcode::STATEPOINT; + bool UntieRegsHappened = false; + auto UntieRegsCheck = [UntieRegsHappened]() { + assert(!UntieRegsHappened && "Untie Regs broken"); + return false; + }; // Spill subregs if the target allows it. // We always want to spill subregs for stackmap/patchpoint pseudos. @@ -838,14 +843,16 @@ continue; } - if (UntieRegs && MO.isTied()) + if (UntieRegs && MO.isTied()) { + UntieRegsHappened = true; MI->untieRegOperand(Idx); + } if (!SpillSubRegs && MO.getSubReg()) - return false; + return UntieRegsCheck(); // We cannot fold a load instruction into a def. if (LoadMI && MO.isDef()) - return false; + return UntieRegsCheck(); // Tied use operands should not be passed to foldMemoryOperand. if (!MI->isRegTiedToDefOperand(Idx)) FoldOps.push_back(Idx); @@ -854,7 +861,7 @@ // If we only have implicit uses, we won't be able to fold that. // Moreover, TargetInstrInfo::foldMemoryOperand will assert if we try! if (FoldOps.empty()) - return false; + return UntieRegsCheck(); MachineInstrSpan MIS(MI, MI->getParent()); @@ -862,7 +869,7 @@ LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS) : TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS, &VRM); if (!FoldMI) - return false; + return UntieRegsCheck(); // Remove LIS for any dead defs in the original MI not in FoldMI. for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) {