Index: include/llvm/CodeGen/MachineOperand.h =================================================================== --- include/llvm/CodeGen/MachineOperand.h +++ include/llvm/CodeGen/MachineOperand.h @@ -116,9 +116,9 @@ /// the same register. In that case, the instruction may depend on those /// operands reading the same dont-care value. For example: /// - /// %vreg1 = XOR %vreg2, %vreg2 + /// %1 = XOR %2, %2 /// - /// Any register can be used for %vreg2, and its value doesn't matter, but + /// Any register can be used for %2, and its value doesn't matter, but /// the two operands must be the same register. /// bool IsUndef : 1; Index: include/llvm/CodeGen/TargetRegisterInfo.h =================================================================== --- include/llvm/CodeGen/TargetRegisterInfo.h +++ include/llvm/CodeGen/TargetRegisterInfo.h @@ -1138,8 +1138,8 @@ /// /// The format is: /// _ - NoRegister -/// %vreg5 - a virtual register. -/// %vreg5:sub_8bit - a virtual register with sub-register index (with TRI). +/// %5 - a virtual register. +/// %5:sub_8bit - a virtual register with sub-register index (with TRI). /// %eax - a physical register /// %physreg17 - a physical register when no TRI instance given. /// Index: lib/CodeGen/DetectDeadLanes.cpp =================================================================== --- lib/CodeGen/DetectDeadLanes.cpp +++ lib/CodeGen/DetectDeadLanes.cpp @@ -17,12 +17,12 @@ /// when subregisters are involved. /// /// Example: -/// %vreg0 = some definition -/// %vreg1 = IMPLICIT_DEF -/// %vreg2 = REG_SEQUENCE %vreg0, sub0, %vreg1, sub1 -/// %vreg3 = EXTRACT_SUBREG %vreg2, sub1 -/// = use %vreg3 -/// The %vreg0 definition is dead and %vreg3 contains an undefined value. +/// %0 = some definition +/// %1 = IMPLICIT_DEF +/// %2 = REG_SEQUENCE %0, sub0, %1, sub1 +/// %3 = EXTRACT_SUBREG %2, sub1 +/// = use %3 +/// The %0 definition is dead and %3 contains an undefined value. // //===----------------------------------------------------------------------===// Index: lib/CodeGen/LiveIntervalAnalysis.cpp =================================================================== --- lib/CodeGen/LiveIntervalAnalysis.cpp +++ lib/CodeGen/LiveIntervalAnalysis.cpp @@ -698,11 +698,11 @@ // Check if any of the regunits are live beyond the end of RI. That could // happen when a physreg is defined as a copy of a virtreg: // - // %EAX = COPY %vreg5 - // FOO %vreg5 <--- MI, cancel kill because %EAX is live. + // %EAX = COPY %5 + // FOO %5 <--- MI, cancel kill because %EAX is live. // BAR %EAX // - // There should be no kill flag on FOO when %vreg5 is rewritten as %EAX. + // There should be no kill flag on FOO when %5 is rewritten as %EAX. for (auto &RUP : RU) { const LiveRange &RURange = *RUP.first; LiveRange::const_iterator &I = RUP.second; @@ -719,13 +719,13 @@ // When reading a partial undefined value we must not add a kill flag. // The regalloc might have used the undef lane for something else. // Example: - // %vreg1 = ... ; R32: %vreg1 - // %vreg2:high16 = ... ; R64: %vreg2 - // = read %vreg2 ; R64: %vreg2 - // = read %vreg1 ; R32: %vreg1 - // The flag is correct for %vreg2, but the register allocator may - // assign R0L to %vreg1, and R0 to %vreg2 because the low 32bits of R0 - // are actually never written by %vreg2. After assignment the + // %1 = ... ; R32: %1 + // %2:high16 = ... ; R64: %2 + // = read %2 ; R64: %2 + // = read %1 ; R32: %1 + // The flag is correct for %2, but the register allocator may + // assign R0L to %1, and R0 to %2 because the low 32bits of R0 + // are actually never written by %2. After assignment the // flag at the read instruction is invalid. LaneBitmask DefinedLanesMask; if (!SRs.empty()) { Index: lib/CodeGen/PeepholeOptimizer.cpp =================================================================== --- lib/CodeGen/PeepholeOptimizer.cpp +++ lib/CodeGen/PeepholeOptimizer.cpp @@ -1453,10 +1453,10 @@ // only the first copy is considered. // // e.g. -// %vreg1 = COPY %vreg0 -// %vreg2 = COPY %vreg0:sub1 +// %1 = COPY %0 +// %2 = COPY %0:sub1 // -// Should replace %vreg2 uses with %vreg1:sub1 +// Should replace %2 uses with %1:sub1 bool PeepholeOptimizer::foldRedundantCopy( MachineInstr *MI, SmallSet &CopySrcRegs, DenseMap &CopyMIs) { @@ -1621,16 +1621,16 @@ /// from the phi. For example, if there is a recurrence of /// /// LoopHeader: -/// %vreg1 = phi(%vreg0, %vreg100) +/// %1 = phi(%0, %100) /// LoopLatch: -/// %vreg0 = ADD %vreg2, %vreg1 +/// %0 = ADD %2, %1 /// /// , the fact that vreg0 and vreg2 are in the same tied operands set makes /// the coalescing of copy instruction generated from the phi in -/// LoopHeader(i.e. %vreg1 = COPY %vreg0) impossible, because %vreg1 and -/// %vreg2 have overlapping live range. This introduces additional move -/// instruction to the final assembly. However, if we commute %vreg2 and -/// %vreg1 of ADD instruction, the redundant move instruction can be +/// LoopHeader(i.e. %1 = COPY %0) impossible, because %1 and +/// %2 have overlapping live range. This introduces additional move +/// instruction to the final assembly. However, if we commute %2 and +/// %1 of ADD instruction, the redundant move instruction can be /// avoided. bool PeepholeOptimizer::optimizeRecurrence(MachineInstr &PHI) { SmallSet TargetRegs; @@ -1696,7 +1696,7 @@ // Track when a non-allocatable physical register is copied to a virtual // register so that useless moves can be removed. // - // %PHYSREG is the map index; MI is the last valid `%vreg = COPY %PHYSREG` + // %PHYSREG is the map index; MI is the last valid `% = COPY %PHYSREG` // without any intervening re-definition of %PHYSREG. DenseMap NAPhysToVirtMIs; Index: lib/CodeGen/RegisterCoalescer.cpp =================================================================== --- lib/CodeGen/RegisterCoalescer.cpp +++ lib/CodeGen/RegisterCoalescer.cpp @@ -228,9 +228,9 @@ /// flag. /// This can happen when undef uses were previously concealed by a copy /// which we coalesced. Example: - /// %vreg0:sub0 = ... - /// %vreg1 = COPY %vreg0 <-- Coalescing COPY reveals undef - /// = use %vreg1:sub1 <-- hidden undef use + /// %0:sub0 = ... + /// %1 = COPY %0 <-- Coalescing COPY reveals undef + /// = use %1:sub1 <-- hidden undef use void addUndefFlag(const LiveInterval &Int, SlotIndex UseIdx, MachineOperand &MO, unsigned SubRegIdx); @@ -1143,10 +1143,10 @@ NewMI.setDebugLoc(DL); // In a situation like the following: - // %vreg0:subreg = instr ; DefMI, subreg = DstIdx - // %vreg1 = copy %vreg0:subreg ; CopyMI, SrcIdx = 0 - // instead of widening %vreg1 to the register class of %vreg0 simply do: - // %vreg1 = instr + // %0:subreg = instr ; DefMI, subreg = DstIdx + // %1 = copy %0:subreg ; CopyMI, SrcIdx = 0 + // instead of widening %1 to the register class of %0 simply do: + // %1 = instr const TargetRegisterClass *NewRC = CP.getNewRC(); if (DstIdx != 0) { MachineOperand &DefMO = NewMI.getOperand(0); @@ -1353,9 +1353,9 @@ // ProcessImpicitDefs may leave some copies of values, it only removes // local variables. When we have a copy like: // - // %vreg1 = COPY %vreg2 + // %1 = COPY %2 // - // We delete the copy and remove the corresponding value number from %vreg1. + // We delete the copy and remove the corresponding value number from %1. // Any uses of that value number are marked as . // Note that we do not query CoalescerPair here but redo isMoveInstr as the @@ -1820,18 +1820,18 @@ MachineInstr *CopyMI; if (CP.isFlipped()) { // Physreg is copied into vreg - // %vregY = COPY %X + // %Y = COPY %X // ... //< no other def of %X here - // use %vregY + // use %Y // => // ... // use %X CopyMI = MRI->getVRegDef(SrcReg); } else { // VReg is copied into physreg: - // %vregX = def + // %X = def // ... //< no other def or use of %Y here - // %Y = COPY %vregX + // %Y = COPY %X // => // %Y = def // ... Index: lib/CodeGen/RenameIndependentSubregs.cpp =================================================================== --- lib/CodeGen/RenameIndependentSubregs.cpp +++ lib/CodeGen/RenameIndependentSubregs.cpp @@ -10,20 +10,20 @@ /// Rename independent subregisters looks for virtual registers with /// independently used subregisters and renames them to new virtual registers. /// Example: In the following: -/// %vreg0:sub0 = ... -/// %vreg0:sub1 = ... -/// use %vreg0:sub0 -/// %vreg0:sub0 = ... -/// use %vreg0:sub0 -/// use %vreg0:sub1 +/// %0:sub0 = ... +/// %0:sub1 = ... +/// use %0:sub0 +/// %0:sub0 = ... +/// use %0:sub0 +/// use %0:sub1 /// sub0 and sub1 are never used together, and we have two independent sub0 /// definitions. This pass will rename to: -/// %vreg0:sub0 = ... -/// %vreg1:sub1 = ... -/// use %vreg1:sub1 -/// %vreg2:sub1 = ... -/// use %vreg2:sub1 -/// use %vreg0:sub0 +/// %0:sub0 = ... +/// %1:sub1 = ... +/// use %1:sub1 +/// %2:sub1 = ... +/// use %2:sub1 +/// use %0:sub0 // //===----------------------------------------------------------------------===// Index: lib/CodeGen/SplitKit.cpp =================================================================== --- lib/CodeGen/SplitKit.cpp +++ lib/CodeGen/SplitKit.cpp @@ -1375,9 +1375,9 @@ continue; // The problem here can be that the new register may have been created // for a partially defined original register. For example: - // %vreg827:subreg_hireg = ... + // %827:subreg_hireg = ... // ... - // %vreg828 = COPY %vreg827 + // %828 = COPY %827 if (S.empty()) continue; SubLRC.reset(&VRM.getMachineFunction(), LIS.getSlotIndexes(), &MDT, Index: lib/CodeGen/TargetRegisterInfo.cpp =================================================================== --- lib/CodeGen/TargetRegisterInfo.cpp +++ lib/CodeGen/TargetRegisterInfo.cpp @@ -92,7 +92,7 @@ else if (TargetRegisterInfo::isStackSlot(Reg)) OS << "SS#" << TargetRegisterInfo::stackSlot2Index(Reg); else if (TargetRegisterInfo::isVirtualRegister(Reg)) - OS << "%vreg" << TargetRegisterInfo::virtReg2Index(Reg); + OS << '%' << TargetRegisterInfo::virtReg2Index(Reg); else if (TRI && Reg < TRI->getNumRegs()) OS << '%' << StringRef(TRI->getName(Reg)).lower(); else @@ -132,7 +132,7 @@ Printable printVRegOrUnit(unsigned Unit, const TargetRegisterInfo *TRI) { return Printable([Unit, TRI](raw_ostream &OS) { if (TRI && TRI->isVirtualRegister(Unit)) { - OS << "%vreg" << TargetRegisterInfo::virtReg2Index(Unit); + OS << '%' << TargetRegisterInfo::virtReg2Index(Unit); } else { OS << printRegUnit(Unit, TRI); } Index: lib/Target/AArch64/AArch64InstrInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64InstrInfo.cpp +++ lib/Target/AArch64/AArch64InstrInfo.cpp @@ -2801,14 +2801,14 @@ LiveIntervals *LIS) const { // This is a bit of a hack. Consider this instruction: // - // %vreg0 = COPY %SP; GPR64all:%vreg0 + // %0 = COPY %SP; GPR64all:%0 // // We explicitly chose GPR64all for the virtual register so such a copy might // be eliminated by RegisterCoalescer. However, that may not be possible, and - // %vreg0 may even spill. We can't spill %SP, and since it is in the GPR64all + // %0 may even spill. We can't spill %SP, and since it is in the GPR64all // register class, TargetInstrInfo::foldMemoryOperand() is going to try. // - // To prevent that, we are going to constrain the %vreg0 register class here. + // To prevent that, we are going to constrain the %0 register class here. // // // @@ -2830,7 +2830,7 @@ // Handle the case where a copy is being spilled or filled but the source // and destination register class don't match. For example: // - // %vreg0 = COPY %XZR; GPR64common:%vreg0 + // %0 = COPY %XZR; GPR64common:%0 // // In this case we can still safely fold away the COPY and generate the // following spill code: @@ -2840,16 +2840,16 @@ // This also eliminates spilled cross register class COPYs (e.g. between x and // d regs) of the same size. For example: // - // %vreg0 = COPY %vreg1; GPR64:%vreg0, FPR64:%vreg1 + // %0 = COPY %1; GPR64:%0, FPR64:%1 // // will be filled as // - // LDRDui %vreg0, fi<#0> + // LDRDui %0, fi<#0> // // instead of // - // LDRXui %vregTemp, fi<#0> - // %vreg0 = FMOV %vregTemp + // LDRXui %Temp, fi<#0> + // %0 = FMOV %Temp // if (MI.isCopy() && Ops.size() == 1 && // Make sure we're only folding the explicit COPY defs/uses. @@ -2886,7 +2886,7 @@ // Handle cases like spilling def of: // - // %vreg0:sub_32 = COPY %WZR; GPR64common:%vreg0 + // %0:sub_32 = COPY %WZR; GPR64common:%0 // // where the physical register source can be widened and stored to the full // virtual reg destination stack slot, in this case producing: @@ -2934,12 +2934,12 @@ // Handle cases like filling use of: // - // %vreg0:sub_32 = COPY %vreg1; GPR64:%vreg0, GPR32:%vreg1 + // %0:sub_32 = COPY %1; GPR64:%0, GPR32:%1 // // where we can load the full virtual reg source stack slot, into the subreg // destination, in this case producing: // - // LDRWui %vreg0:sub_32, + // LDRWui %0:sub_32, // if (IsFill && SrcMO.getSubReg() == 0 && DstMO.isUndef()) { const TargetRegisterClass *FillRC; Index: lib/Target/AMDGPU/SIFixSGPRCopies.cpp =================================================================== --- lib/Target/AMDGPU/SIFixSGPRCopies.cpp +++ lib/Target/AMDGPU/SIFixSGPRCopies.cpp @@ -14,46 +14,46 @@ /// Register Class is the union of and /// /// BB0: -/// %vreg0 = SCALAR_INST -/// %vreg1 = COPY %vreg0 +/// %0 = SCALAR_INST +/// %1 = COPY %0 /// ... /// BRANCH %cond BB1, BB2 /// BB1: -/// %vreg2 = VECTOR_INST -/// %vreg3 = COPY %vreg2 +/// %2 = VECTOR_INST +/// %3 = COPY %2 /// BB2: -/// %vreg4 = PHI %vreg1 , , %vreg3 , -/// %vreg5 = VECTOR_INST %vreg4 +/// %4 = PHI %1 , , %3 , +/// %5 = VECTOR_INST %4 /// /// /// The coalescer will begin at BB0 and eliminate its copy, then the resulting /// code will look like this: /// /// BB0: -/// %vreg0 = SCALAR_INST +/// %0 = SCALAR_INST /// ... /// BRANCH %cond BB1, BB2 /// BB1: -/// %vreg2 = VECTOR_INST -/// %vreg3 = COPY %vreg2 +/// %2 = VECTOR_INST +/// %3 = COPY %2 /// BB2: -/// %vreg4 = PHI %vreg0 , , %vreg3 , -/// %vreg5 = VECTOR_INST %vreg4 +/// %4 = PHI %0 , , %3 , +/// %5 = VECTOR_INST %4 /// /// Now that the result of the PHI instruction is an SGPR, the register -/// allocator is now forced to constrain the register class of %vreg3 to +/// allocator is now forced to constrain the register class of %3 to /// so we end up with final code like this: /// /// BB0: -/// %vreg0 = SCALAR_INST +/// %0 = SCALAR_INST /// ... /// BRANCH %cond BB1, BB2 /// BB1: -/// %vreg2 = VECTOR_INST -/// %vreg3 = COPY %vreg2 +/// %2 = VECTOR_INST +/// %3 = COPY %2 /// BB2: -/// %vreg4 = PHI %vreg0 , , %vreg3 , -/// %vreg5 = VECTOR_INST %vreg4 +/// %4 = PHI %0 , , %3 , +/// %5 = VECTOR_INST %4 /// /// Now this code contains an illegal copy from a VGPR to an SGPR. /// Index: lib/Target/AMDGPU/SIFoldOperands.cpp =================================================================== --- lib/Target/AMDGPU/SIFoldOperands.cpp +++ lib/Target/AMDGPU/SIFoldOperands.cpp @@ -290,11 +290,11 @@ // copy since a subregister use tied to a full register def doesn't really // make sense. e.g. don't fold: // - // %vreg1 = COPY %vreg0:sub1 - // %vreg2 = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg1 + // %1 = COPY %0:sub1 + // %2 = V_MAC_{F16, F32} %3, %4, %1 // // into - // %vreg2 = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg0:sub1 + // %2 = V_MAC_{F16, F32} %3, %4, %0:sub1 if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister) return; } @@ -971,7 +971,7 @@ // Prevent folding operands backwards in the function. For example, // the COPY opcode must not be replaced by 1 in this example: // - // %vreg3 = COPY %VGPR0; VGPR_32:%vreg3 + // %3 = COPY %VGPR0; VGPR_32:%3 // ... // %VGPR0 = V_MOV_B32_e32 1, %EXEC MachineOperand &Dst = MI.getOperand(0); Index: lib/Target/AMDGPU/SIPeepholeSDWA.cpp =================================================================== --- lib/Target/AMDGPU/SIPeepholeSDWA.cpp +++ lib/Target/AMDGPU/SIPeepholeSDWA.cpp @@ -10,12 +10,12 @@ /// \file This pass tries to apply several peephole SDWA patterns. /// /// E.g. original: -/// V_LSHRREV_B32_e32 %vreg0, 16, %vreg1 -/// V_ADD_I32_e32 %vreg2, %vreg0, %vreg3 -/// V_LSHLREV_B32_e32 %vreg4, 16, %vreg2 +/// V_LSHRREV_B32_e32 %0, 16, %1 +/// V_ADD_I32_e32 %2, %0, %3 +/// V_LSHLREV_B32_e32 %4, 16, %2 /// /// Replace: -/// V_ADD_I32_sdwa %vreg4, %vreg1, %vreg3 +/// V_ADD_I32_sdwa %4, %1, %3 /// dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD /// //===----------------------------------------------------------------------===// @@ -410,7 +410,7 @@ } // If this is not immediate then it can be copy of immediate value, e.g.: - // %vreg1 = S_MOV_B32 255; + // %1 = S_MOV_B32 255; if (Op.isReg()) { for (const MachineOperand &Def : MRI->def_operands(Op.getReg())) { if (!isSameReg(Op, Def)) Index: lib/Target/ARM/ARMBaseInstrInfo.cpp =================================================================== --- lib/Target/ARM/ARMBaseInstrInfo.cpp +++ lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -1650,7 +1650,7 @@ } for (unsigned i = 3, e = MI0.getNumOperands(); i != e; ++i) { - // %vreg12 = PICLDR %vreg11, 0, pred:14, pred:_ + // %12 = PICLDR %11, 0, pred:14, pred:_ const MachineOperand &MO0 = MI0.getOperand(i); const MachineOperand &MO1 = MI1.getOperand(i); if (!MO0.isIdenticalTo(MO1)) Index: lib/Target/BPF/BPFISelDAGToDAG.cpp =================================================================== --- lib/Target/BPF/BPFISelDAGToDAG.cpp +++ lib/Target/BPF/BPFISelDAGToDAG.cpp @@ -546,7 +546,7 @@ if (!RegN || !TargetRegisterInfo::isVirtualRegister(RegN->getReg())) return; unsigned AndOpReg = RegN->getReg(); - DEBUG(dbgs() << "Examine %vreg" << TargetRegisterInfo::virtReg2Index(AndOpReg) + DEBUG(dbgs() << "Examine %" << TargetRegisterInfo::virtReg2Index(AndOpReg) << '\n'); // Examine the PHI insns in the MachineBasicBlock to found out the @@ -574,9 +574,9 @@ return; } else { // The PHI node looks like: - // %vreg2 = PHI %vreg0, , %vreg1, - // Trace each incoming definition, e.g., (%vreg0, BB#1) and (%vreg1, BB#3) - // The AND operation can be removed if both %vreg0 in BB#1 and %vreg1 in + // %2 = PHI %0, , %1, + // Trace each incoming definition, e.g., (%0, BB#1) and (%1, BB#3) + // The AND operation can be removed if both %0 in BB#1 and %1 in // BB#3 are defined with with a load matching the MaskN. DEBUG(dbgs() << "Check PHI Insn: "; MII->dump(); dbgs() << '\n'); unsigned PrevReg = -1; Index: lib/Target/Hexagon/HexagonBlockRanges.cpp =================================================================== --- lib/Target/Hexagon/HexagonBlockRanges.cpp +++ lib/Target/Hexagon/HexagonBlockRanges.cpp @@ -368,7 +368,7 @@ } } // Defs and clobbers can overlap, e.g. - // %D0 = COPY %vreg5, %R0, %R1 + // %D0 = COPY %5, %R0, %R1 for (RegisterRef R : Defs) Clobbers.erase(R); Index: lib/Target/Hexagon/HexagonConstPropagation.cpp =================================================================== --- lib/Target/Hexagon/HexagonConstPropagation.cpp +++ lib/Target/Hexagon/HexagonConstPropagation.cpp @@ -1974,7 +1974,7 @@ { const MachineOperand &VO = MI.getOperand(1); // The operand of CONST32 can be a blockaddress, e.g. - // %vreg0 = CONST32 + // %0 = CONST32 // Do this check for all instructions for safety. if (!VO.isImm()) return false; Index: lib/Target/Hexagon/HexagonEarlyIfConv.cpp =================================================================== --- lib/Target/Hexagon/HexagonEarlyIfConv.cpp +++ lib/Target/Hexagon/HexagonEarlyIfConv.cpp @@ -25,37 +25,37 @@ // // Example: // -// %vreg40 = L2_loadrub_io %vreg39, 1 -// %vreg41 = S2_tstbit_i %vreg40, 0 -// J2_jumpt %vreg41, , %PC +// %40 = L2_loadrub_io %39, 1 +// %41 = S2_tstbit_i %40, 0 +// J2_jumpt %41, , %PC // J2_jump , %PC // Successors according to CFG: BB#4(62) BB#5(62) // // BB#4: derived from LLVM BB %if.then // Predecessors according to CFG: BB#3 -// %vreg11 = A2_addp %vreg6, %vreg10 -// S2_storerd_io %vreg32, 16, %vreg11 +// %11 = A2_addp %6, %10 +// S2_storerd_io %32, 16, %11 // Successors according to CFG: BB#5 // // BB#5: derived from LLVM BB %if.end // Predecessors according to CFG: BB#3 BB#4 -// %vreg12 = PHI %vreg6, , %vreg11, -// %vreg13 = A2_addp %vreg7, %vreg12 -// %vreg42 = C2_cmpeqi %vreg9, 10 -// J2_jumpf %vreg42, , %PC +// %12 = PHI %6, , %11, +// %13 = A2_addp %7, %12 +// %42 = C2_cmpeqi %9, 10 +// J2_jumpf %42, , %PC // J2_jump , %PC // Successors according to CFG: BB#6(4) BB#3(124) // // would become: // -// %vreg40 = L2_loadrub_io %vreg39, 1 -// %vreg41 = S2_tstbit_i %vreg40, 0 -// spec-> %vreg11 = A2_addp %vreg6, %vreg10 -// pred-> S2_pstorerdf_io %vreg41, %vreg32, 16, %vreg11 -// %vreg46 = PS_pselect %vreg41, %vreg6, %vreg11 -// %vreg13 = A2_addp %vreg7, %vreg46 -// %vreg42 = C2_cmpeqi %vreg9, 10 -// J2_jumpf %vreg42, , %PC +// %40 = L2_loadrub_io %39, 1 +// %41 = S2_tstbit_i %40, 0 +// spec-> %11 = A2_addp %6, %10 +// pred-> S2_pstorerdf_io %41, %32, 16, %11 +// %46 = PS_pselect %41, %6, %11 +// %13 = A2_addp %7, %46 +// %42 = C2_cmpeqi %9, 10 +// J2_jumpf %42, , %PC // J2_jump , %PC // Successors according to CFG: BB#6 BB#3 Index: lib/Target/Hexagon/HexagonGenInsert.cpp =================================================================== --- lib/Target/Hexagon/HexagonGenInsert.cpp +++ lib/Target/Hexagon/HexagonGenInsert.cpp @@ -1107,9 +1107,9 @@ // Now, remove those whose sets of potentially removable registers are // contained in another IF candidate for VR. For example, given these // candidates for vreg45, - // %vreg45: - // (%vreg44,%vreg41,#9,#8), { %vreg42 } - // (%vreg43,%vreg41,#9,#8), { %vreg42 %vreg44 } + // %45: + // (%44,%41,#9,#8), { %42 } + // (%43,%41,#9,#8), { %42 %44 } // remove the first one, since it is contained in the second one. for (unsigned i = 0, n = LL.size(); i < n; ) { const RegisterSet &RMi = LL[i].second; Index: lib/Target/Hexagon/HexagonHardwareLoops.cpp =================================================================== --- lib/Target/Hexagon/HexagonHardwareLoops.cpp +++ lib/Target/Hexagon/HexagonHardwareLoops.cpp @@ -1720,7 +1720,7 @@ MachineOperand &MO = PredDef->getOperand(i); if (MO.isReg()) { // Skip all implicit references. In one case there was: - // %vreg140 = FCMPUGT32_rr %vreg138, %vreg139, %USR + // %140 = FCMPUGT32_rr %138, %139, %USR if (MO.isImplicit()) continue; if (MO.isUse()) { Index: lib/Target/Hexagon/HexagonPeephole.cpp =================================================================== --- lib/Target/Hexagon/HexagonPeephole.cpp +++ lib/Target/Hexagon/HexagonPeephole.cpp @@ -8,27 +8,27 @@ // This peephole pass optimizes in the following cases. // 1. Optimizes redundant sign extends for the following case // Transform the following pattern -// %vreg170 = SXTW %vreg166 +// %170 = SXTW %166 // ... -// %vreg176 = COPY %vreg170:isub_lo +// %176 = COPY %170:isub_lo // // Into -// %vreg176 = COPY vreg166 +// %176 = COPY vreg166 // // 2. Optimizes redundant negation of predicates. -// %vreg15 = CMPGTrr %vreg6, %vreg2 +// %15 = CMPGTrr %6, %2 // ... -// %vreg16 = NOT_p %vreg15 +// %16 = NOT_p %15 // ... -// JMP_c %vreg16, , %PC +// JMP_c %16, , %PC // // Into -// %vreg15 = CMPGTrr %vreg6, %vreg2; +// %15 = CMPGTrr %6, %2; // ... -// JMP_cNot %vreg15, , %PC; +// JMP_cNot %15, , %PC; // // Note: The peephole pass makes the instrucstions like -// %vreg170 = SXTW %vreg166 or %vreg16 = NOT_p %vreg15 +// %170 = SXTW %166 or %16 = NOT_p %15 // redundant and relies on some form of dead removal instructions, like // DCE or DIE to actually eliminate them. @@ -133,7 +133,7 @@ NextI = std::next(I); MachineInstr &MI = *I; // Look for sign extends: - // %vreg170 = SXTW %vreg166 + // %170 = SXTW %166 if (!DisableOptSZExt && MI.getOpcode() == Hexagon::A2_sxtw) { assert(MI.getNumOperands() == 2); MachineOperand &Dst = MI.getOperand(0); @@ -144,14 +144,14 @@ if (TargetRegisterInfo::isVirtualRegister(DstReg) && TargetRegisterInfo::isVirtualRegister(SrcReg)) { // Map the following: - // %vreg170 = SXTW %vreg166 + // %170 = SXTW %166 // PeepholeMap[170] = vreg166 PeepholeMap[DstReg] = SrcReg; } } - // Look for %vreg170 = COMBINE_ir_V4 (0, %vreg169) - // %vreg170:DoublRegs, %vreg169:IntRegs + // Look for %170 = COMBINE_ir_V4 (0, %169) + // %170:DoublRegs, %169:IntRegs if (!DisableOptExtTo64 && MI.getOpcode() == Hexagon::A4_combineir) { assert(MI.getNumOperands() == 3); MachineOperand &Dst = MI.getOperand(0); @@ -165,10 +165,10 @@ } // Look for this sequence below - // %vregDoubleReg1 = LSRd_ri %vregDoubleReg0, 32 - // %vregIntReg = COPY %vregDoubleReg1:isub_lo. + // %DoubleReg1 = LSRd_ri %DoubleReg0, 32 + // %IntReg = COPY %DoubleReg1:isub_lo. // and convert into - // %vregIntReg = COPY %vregDoubleReg0:isub_hi. + // %IntReg = COPY %DoubleReg0:isub_hi. if (MI.getOpcode() == Hexagon::S2_lsr_i_p) { assert(MI.getNumOperands() == 3); MachineOperand &Dst = MI.getOperand(0); @@ -193,14 +193,14 @@ if (TargetRegisterInfo::isVirtualRegister(DstReg) && TargetRegisterInfo::isVirtualRegister(SrcReg)) { // Map the following: - // %vreg170 = NOT_xx %vreg166 + // %170 = NOT_xx %166 // PeepholeMap[170] = vreg166 PeepholeMap[DstReg] = SrcReg; } } // Look for copy: - // %vreg176 = COPY %vreg170:isub_lo + // %176 = COPY %170:isub_lo if (!DisableOptSZExt && MI.isCopy()) { assert(MI.getNumOperands() == 2); MachineOperand &Dst = MI.getOperand(0); Index: lib/Target/Hexagon/HexagonStoreWidening.cpp =================================================================== --- lib/Target/Hexagon/HexagonStoreWidening.cpp +++ lib/Target/Hexagon/HexagonStoreWidening.cpp @@ -9,10 +9,10 @@ // Replace sequences of "narrow" stores to adjacent memory locations with // a fewer "wide" stores that have the same effect. // For example, replace: -// S4_storeirb_io %vreg100, 0, 0 ; store-immediate-byte -// S4_storeirb_io %vreg100, 1, 0 ; store-immediate-byte +// S4_storeirb_io %100, 0, 0 ; store-immediate-byte +// S4_storeirb_io %100, 1, 0 ; store-immediate-byte // with -// S4_storeirh_io %vreg100, 0, 0 ; store-immediate-halfword +// S4_storeirh_io %100, 0, 0 ; store-immediate-halfword // The above is the general idea. The actual cases handled by the code // may be a bit more complex. // The purpose of this pass is to reduce the number of outstanding stores, Index: lib/Target/Hexagon/HexagonSubtarget.cpp =================================================================== --- lib/Target/Hexagon/HexagonSubtarget.cpp +++ lib/Target/Hexagon/HexagonSubtarget.cpp @@ -234,12 +234,12 @@ const MachineInstr *MI = DAG->SUnits[su].getInstr(); if (MI->isCopy() && (MI->readsRegister(Hexagon::R0, &TRI) || MI->readsRegister(Hexagon::V0, &TRI))) { - // %vregX = COPY %R0 + // %X = COPY %R0 VRegHoldingRet = MI->getOperand(0).getReg(); RetRegister = MI->getOperand(1).getReg(); LastUseOfRet = nullptr; } else if (VRegHoldingRet && MI->readsVirtualRegister(VRegHoldingRet)) - // + // LastUseOfRet = &DAG->SUnits[su]; else if (LastUseOfRet && MI->definesRegister(RetRegister, &TRI)) // %R0 = ... Index: lib/Target/NVPTX/NVPTXPeephole.cpp =================================================================== --- lib/Target/NVPTX/NVPTXPeephole.cpp +++ lib/Target/NVPTX/NVPTXPeephole.cpp @@ -22,11 +22,11 @@ // This peephole pass optimizes these cases, for example // // It will transform the following pattern -// %vreg0 = LEA_ADDRi64 %VRFrame, 4 -// %vreg1 = cvta_to_local_yes_64 %vreg0 +// %0 = LEA_ADDRi64 %VRFrame, 4 +// %1 = cvta_to_local_yes_64 %0 // // into -// %vreg1 = LEA_ADDRi64 %VRFrameLocal, 4 +// %1 = LEA_ADDRi64 %VRFrameLocal, 4 // // %VRFrameLocal is the virtual register name of %SPL // Index: lib/Target/PowerPC/PPCBranchCoalescing.cpp =================================================================== --- lib/Target/PowerPC/PPCBranchCoalescing.cpp +++ lib/Target/PowerPC/PPCBranchCoalescing.cpp @@ -62,11 +62,11 @@ /// BB#0: derived from LLVM BB %entry /// Live Ins: %F1 %F3 %X6 /// -/// %vreg0 = COPY %F1; F8RC:%vreg0 -/// %vreg5 = CMPLWI %vreg4, 0; CRRC:%vreg5 GPRC:%vreg4 -/// %vreg8 = LXSDX %ZERO8, %vreg7, %RM; -/// mem:LD8[ConstantPool] F8RC:%vreg8 G8RC:%vreg7 -/// BCC 76, %vreg5, ; CRRC:%vreg5 +/// %0 = COPY %F1; F8RC:%0 +/// %5 = CMPLWI %4, 0; CRRC:%5 GPRC:%4 +/// %8 = LXSDX %ZERO8, %7, %RM; +/// mem:LD8[ConstantPool] F8RC:%8 G8RC:%7 +/// BCC 76, %5, ; CRRC:%5 /// Successors according to CFG: BB#1(?%) BB#2(?%) /// /// BB#1: derived from LLVM BB %entry @@ -75,10 +75,10 @@ /// /// BB#2: derived from LLVM BB %entry /// Predecessors according to CFG: BB#0 BB#1 -/// %vreg9 = PHI %vreg8, , %vreg0, ; -/// F8RC:%vreg9,%vreg8,%vreg0 +/// %9 = PHI %8, , %0, ; +/// F8RC:%9,%8,%0 /// -/// BCC 76, %vreg5, ; CRRC:%vreg5 +/// BCC 76, %5, ; CRRC:%5 /// Successors according to CFG: BB#3(?%) BB#4(?%) /// /// BB#3: derived from LLVM BB %entry @@ -87,8 +87,8 @@ /// /// BB#4: derived from LLVM BB %entry /// Predecessors according to CFG: BB#2 BB#3 -/// %vreg13 = PHI %vreg12, , %vreg2, ; -/// F8RC:%vreg13,%vreg12,%vreg2 +/// %13 = PHI %12, , %2, ; +/// F8RC:%13,%12,%2 /// /// BLR8 %LR8, %RM, %F1 /// @@ -100,12 +100,12 @@ /// BB#0: derived from LLVM BB %entry /// Live Ins: %F1 %F3 %X6 /// -/// %vreg0 = COPY %F1; F8RC:%vreg0 -/// %vreg5 = CMPLWI %vreg4, 0; CRRC:%vreg5 GPRC:%vreg4 -/// %vreg8 = LXSDX %ZERO8, %vreg7, %RM; -/// mem:LD8[ConstantPool] F8RC:%vreg8 G8RC:%vreg7 +/// %0 = COPY %F1; F8RC:%0 +/// %5 = CMPLWI %4, 0; CRRC:%5 GPRC:%4 +/// %8 = LXSDX %ZERO8, %7, %RM; +/// mem:LD8[ConstantPool] F8RC:%8 G8RC:%7 /// -/// BCC 76, %vreg5, ; CRRC:%vreg5 +/// BCC 76, %5, ; CRRC:%5 /// Successors according to CFG: BB#1(0x2aaaaaaa / 0x80000000 = 33.33%) /// BB#4(0x55555554 / 0x80000000 = 66.67%) /// @@ -115,10 +115,10 @@ /// /// BB#4: derived from LLVM BB %entry /// Predecessors according to CFG: BB#0 BB#1 -/// %vreg9 = PHI %vreg8, , %vreg0, ; -/// F8RC:%vreg9,%vreg8,%vreg0 -/// %vreg13 = PHI %vreg12, , %vreg2, ; -/// F8RC:%vreg13,%vreg12,%vreg2 +/// %9 = PHI %8, , %0, ; +/// F8RC:%9,%8,%0 +/// %13 = PHI %12, , %2, ; +/// F8RC:%13,%12,%2 /// /// BLR8 %LR8, %RM, %F1 /// Index: lib/Target/PowerPC/PPCInstrInfo.cpp =================================================================== --- lib/Target/PowerPC/PPCInstrInfo.cpp +++ lib/Target/PowerPC/PPCInstrInfo.cpp @@ -2243,7 +2243,7 @@ // ADJCALLSTACKDOWN 32, %R1, %R1 // BL8_NOP ,... // ADJCALLSTACKUP 32, 0, %R1, %R1 - // %vreg5 = COPY %X3; G8RC:%vreg5 + // %5 = COPY %X3; G8RC:%5 if (SrcReg == PPC::X3) { const MachineBasicBlock *MBB = MI.getParent(); MachineBasicBlock::const_instr_iterator II = Index: lib/Target/PowerPC/PPCMIPeephole.cpp =================================================================== --- lib/Target/PowerPC/PPCMIPeephole.cpp +++ lib/Target/PowerPC/PPCMIPeephole.cpp @@ -542,9 +542,9 @@ // We can eliminate RLDICL (e.g. for zero-extension) // if all bits to clear are already zero in the input. // This code assume following code sequence for zero-extension. - // %vreg6 = COPY %vreg5:sub_32; (optional) - // %vreg8 = IMPLICIT_DEF; - // %vreg7 = INSERT_SUBREG %vreg8, %vreg6, sub_32; + // %6 = COPY %5:sub_32; (optional) + // %8 = IMPLICIT_DEF; + // %7 = INSERT_SUBREG %8, %6, sub_32; if (!EnableZExtElimination) break; if (MI.getOperand(2).getImm() != 0) @@ -642,8 +642,8 @@ DEBUG(dbgs() << "Optimizing LI to ADDI: "); DEBUG(LiMI->dump()); - // There could be repeated registers in the PHI, e.g: %vreg1 = - // PHI %vreg6, , %vreg8, , %vreg8, ; So if we've + // There could be repeated registers in the PHI, e.g: %1 = + // PHI %6, , %8, , %8, ; So if we've // already replaced the def instruction, skip. if (LiMI->getOpcode() == PPC::ADDI || LiMI->getOpcode() == PPC::ADDI8) continue; Index: lib/Target/PowerPC/PPCVSXFMAMutate.cpp =================================================================== --- lib/Target/PowerPC/PPCVSXFMAMutate.cpp +++ lib/Target/PowerPC/PPCVSXFMAMutate.cpp @@ -90,21 +90,21 @@ // This pass is run after register coalescing, and so we're looking for // a situation like this: // ... - // %vreg5 = COPY %vreg9; VSLRC:%vreg5,%vreg9 - // %vreg5 = XSMADDADP %vreg5, %vreg17, %vreg16, - // %RM; VSLRC:%vreg5,%vreg17,%vreg16 + // %5 = COPY %9; VSLRC:%5,%9 + // %5 = XSMADDADP %5, %17, %16, + // %RM; VSLRC:%5,%17,%16 // ... - // %vreg9 = XSMADDADP %vreg9, %vreg17, %vreg19, - // %RM; VSLRC:%vreg9,%vreg17,%vreg19 + // %9 = XSMADDADP %9, %17, %19, + // %RM; VSLRC:%9,%17,%19 // ... // Where we can eliminate the copy by changing from the A-type to the // M-type instruction. Specifically, for this example, this means: - // %vreg5 = XSMADDADP %vreg5, %vreg17, %vreg16, - // %RM; VSLRC:%vreg5,%vreg17,%vreg16 + // %5 = XSMADDADP %5, %17, %16, + // %RM; VSLRC:%5,%17,%16 // is replaced by: - // %vreg16 = XSMADDMDP %vreg16, %vreg18, %vreg9, - // %RM; VSLRC:%vreg16,%vreg18,%vreg9 - // and we remove: %vreg5 = COPY %vreg9; VSLRC:%vreg5,%vreg9 + // %16 = XSMADDMDP %16, %18, %9, + // %RM; VSLRC:%16,%18,%9 + // and we remove: %5 = COPY %9; VSLRC:%5,%9 SlotIndex FMAIdx = LIS->getInstructionIndex(MI); @@ -150,13 +150,13 @@ // walking the MIs we may as well test liveness here. // // FIXME: There is a case that occurs in practice, like this: - // %vreg9 = COPY %F1; VSSRC:%vreg9 + // %9 = COPY %F1; VSSRC:%9 // ... - // %vreg6 = COPY %vreg9; VSSRC:%vreg6,%vreg9 - // %vreg7 = COPY %vreg9; VSSRC:%vreg7,%vreg9 - // %vreg9 = XSMADDASP %vreg9, %vreg1, %vreg4; VSSRC: - // %vreg6 = XSMADDASP %vreg6, %vreg1, %vreg2; VSSRC: - // %vreg7 = XSMADDASP %vreg7, %vreg1, %vreg3; VSSRC: + // %6 = COPY %9; VSSRC:%6,%9 + // %7 = COPY %9; VSSRC:%7,%9 + // %9 = XSMADDASP %9, %1, %4; VSSRC: + // %6 = XSMADDASP %6, %1, %2; VSSRC: + // %7 = XSMADDASP %7, %1, %3; VSSRC: // which prevents an otherwise-profitable transformation. bool OtherUsers = false, KillsAddendSrc = false; for (auto J = std::prev(I), JE = MachineBasicBlock::iterator(AddendMI); @@ -177,10 +177,10 @@ // The transformation doesn't work well with things like: - // %vreg5 = A-form-op %vreg5, %vreg11, %vreg5; + // %5 = A-form-op %5, %11, %5; // unless vreg11 is also a kill, so skip when it is not, // and check operand 3 to see it is also a kill to handle the case: - // %vreg5 = A-form-op %vreg5, %vreg5, %vreg11; + // %5 = A-form-op %5, %5, %11; // where vreg5 and vreg11 are both kills. This case would be skipped // otherwise. unsigned OldFMAReg = MI.getOperand(0).getReg(); Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -6967,10 +6967,10 @@ // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already // lowered this: - // (extract_vector_elt (v8f32 %vreg1), Constant<6>) + // (extract_vector_elt (v8f32 %1), Constant<6>) // to: // (extract_vector_elt (vector_shuffle<2,u,u,u> - // (extract_subvector (v8f32 %vreg0), Constant<4>), + // (extract_subvector (v8f32 %0), Constant<4>), // undef) // Constant<0>) // In this case the vector is the extract_subvector expression and the index Index: test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll =================================================================== --- test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll +++ test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll @@ -43,7 +43,7 @@ ; The key problem here is that we may fail to create an MBB referenced by a ; PHI. If so, we cannot complete the G_PHI and mustn't try or bad things ; happen. -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: cannot select: G_STORE %vreg6, %vreg2; mem:ST4[%addr] GPR:%vreg6,%vreg2 (in function: pending_phis) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: cannot select: G_STORE %6, %2; mem:ST4[%addr] GPR:%6,%2 (in function: pending_phis) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for pending_phis ; FALLBACK-WITH-REPORT-OUT-LABEL: pending_phis: define i32 @pending_phis(i1 %tst, i32 %val, i32* %addr) { @@ -63,7 +63,7 @@ } ; General legalizer inability to handle types whose size wasn't a power of 2. -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %vreg1, %vreg0; mem:ST6[%addr](align=8) (in function: odd_type) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %1, %0; mem:ST6[%addr](align=8) (in function: odd_type) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for odd_type ; FALLBACK-WITH-REPORT-OUT-LABEL: odd_type: define void @odd_type(i42* %addr) { @@ -72,7 +72,7 @@ ret void } -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %vreg1, %vreg0; mem:ST28[%addr](align=32) (in function: odd_vector) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %1, %0; mem:ST28[%addr](align=32) (in function: odd_vector) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for odd_vector ; FALLBACK-WITH-REPORT-OUT-LABEL: odd_vector: define void @odd_vector(<7 x i32>* %addr) { @@ -91,7 +91,7 @@ } ; Just to make sure we don't accidentally emit a normal load/store. -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: cannot select: %vreg2(s64) = G_LOAD %vreg0; mem:LD8[%addr] GPR:%vreg2,%vreg0 (in function: atomic_ops) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: cannot select: %2(s64) = G_LOAD %0; mem:LD8[%addr] GPR:%2,%0 (in function: atomic_ops) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for atomic_ops ; FALLBACK-WITH-REPORT-LABEL: atomic_ops: define i64 @atomic_ops(i64* %addr) { @@ -132,14 +132,14 @@ } ; Check that we fallback on invoke translation failures. -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: %vreg0(s128) = G_FCONSTANT quad 2 +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: %0(s128) = G_FCONSTANT quad 2 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for test_quad_dump ; FALLBACK-WITH-REPORT-OUT-LABEL: test_quad_dump: define fp128 @test_quad_dump() { ret fp128 0xL00000000000000004000000000000000 } -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: %vreg0(p0) = G_EXTRACT_VECTOR_ELT %vreg1, %vreg2; (in function: vector_of_pointers_extractelement) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: %0(p0) = G_EXTRACT_VECTOR_ELT %1, %2; (in function: vector_of_pointers_extractelement) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for vector_of_pointers_extractelement ; FALLBACK-WITH-REPORT-OUT-LABEL: vector_of_pointers_extractelement: @var = global <2 x i16*> zeroinitializer @@ -156,7 +156,7 @@ br label %block } -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %vreg0, %vreg4; mem:ST16[undef] (in function: vector_of_pointers_insertelement) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %0, %4; mem:ST16[undef] (in function: vector_of_pointers_insertelement) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for vector_of_pointers_insertelement ; FALLBACK-WITH-REPORT-OUT-LABEL: vector_of_pointers_insertelement: define void @vector_of_pointers_insertelement() { @@ -172,7 +172,7 @@ br label %block } -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %vreg1, %vreg3; mem:ST12[undef](align=4) (in function: nonpow2_insertvalue_narrowing) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %1, %3; mem:ST12[undef](align=4) (in function: nonpow2_insertvalue_narrowing) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_insertvalue_narrowing ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_insertvalue_narrowing: %struct96 = type { float, float, float } @@ -182,7 +182,7 @@ ret void } -; FALLBACK-WITH-REPORT-ERR remark: :0:0: unable to legalize instruction: G_STORE %vreg3, %vreg4; mem:ST12[undef](align=16) (in function: nonpow2_add_narrowing) +; FALLBACK-WITH-REPORT-ERR remark: :0:0: unable to legalize instruction: G_STORE %3, %4; mem:ST12[undef](align=16) (in function: nonpow2_add_narrowing) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_add_narrowing ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_add_narrowing: define void @nonpow2_add_narrowing() { @@ -193,7 +193,7 @@ ret void } -; FALLBACK-WITH-REPORT-ERR remark: :0:0: unable to legalize instruction: G_STORE %vreg3, %vreg4; mem:ST12[undef](align=16) (in function: nonpow2_add_narrowing) +; FALLBACK-WITH-REPORT-ERR remark: :0:0: unable to legalize instruction: G_STORE %3, %4; mem:ST12[undef](align=16) (in function: nonpow2_add_narrowing) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_or_narrowing ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_or_narrowing: define void @nonpow2_or_narrowing() { @@ -204,7 +204,7 @@ ret void } -; FALLBACK-WITH-REPORT-ERR remark: :0:0: unable to legalize instruction: G_STORE %vreg0, %vreg1; mem:ST12[undef](align=16) (in function: nonpow2_load_narrowing) +; FALLBACK-WITH-REPORT-ERR remark: :0:0: unable to legalize instruction: G_STORE %0, %1; mem:ST12[undef](align=16) (in function: nonpow2_load_narrowing) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_load_narrowing ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_load_narrowing: define void @nonpow2_load_narrowing() { @@ -213,7 +213,7 @@ ret void } -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %vreg3, %vreg0; mem:ST12[%c](align=16) (in function: nonpow2_store_narrowing +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %3, %0; mem:ST12[%c](align=16) (in function: nonpow2_store_narrowing ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_store_narrowing ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_store_narrowing: define void @nonpow2_store_narrowing(i96* %c) { @@ -223,7 +223,7 @@ ret void } -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %vreg0, %vreg1; mem:ST12[undef](align=16) (in function: nonpow2_constant_narrowing) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %0, %1; mem:ST12[undef](align=16) (in function: nonpow2_constant_narrowing) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_constant_narrowing ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_constant_narrowing: define void @nonpow2_constant_narrowing() { @@ -233,8 +233,8 @@ ; Currently can't handle vector lengths that aren't an exact multiple of ; natively supported vector lengths. Test that the fall-back works for those. -; FALLBACK-WITH-REPORT-ERR-G_IMPLICIT_DEF-LEGALIZABLE: (FIXME: this is what is expected once we can legalize non-pow-of-2 G_IMPLICIT_DEF) remark: :0:0: unable to legalize instruction: %vreg1(<7 x s64>) = G_ADD %vreg0, %vreg0; (in function: nonpow2_vector_add_fewerelements -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: %vreg2(s64) = G_EXTRACT_VECTOR_ELT %vreg1, %vreg3; (in function: nonpow2_vector_add_fewerelements) +; FALLBACK-WITH-REPORT-ERR-G_IMPLICIT_DEF-LEGALIZABLE: (FIXME: this is what is expected once we can legalize non-pow-of-2 G_IMPLICIT_DEF) remark: :0:0: unable to legalize instruction: %1(<7 x s64>) = G_ADD %0, %0; (in function: nonpow2_vector_add_fewerelements +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: %2(s64) = G_EXTRACT_VECTOR_ELT %1, %3; (in function: nonpow2_vector_add_fewerelements) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_vector_add_fewerelements ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_vector_add_fewerelements: define void @nonpow2_vector_add_fewerelements() { Index: test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir +++ test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir @@ -9,8 +9,8 @@ ... --- # CHECK: *** Bad machine code: Generic virtual register must have a bank in a RegBankSelected function *** -# CHECK: instruction: %vreg0(s64) = COPY -# CHECK: operand 0: %vreg0 +# CHECK: instruction: %0(s64) = COPY +# CHECK: operand 0: %0 name: test regBankSelected: true registers: Index: test/CodeGen/AArch64/GlobalISel/verify-selected.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/verify-selected.mir +++ test/CodeGen/AArch64/GlobalISel/verify-selected.mir @@ -22,11 +22,11 @@ %0 = COPY %x0 ; CHECK: *** Bad machine code: Unexpected generic instruction in a Selected function *** - ; CHECK: instruction: %vreg1 = G_ADD + ; CHECK: instruction: %1 = G_ADD %1 = G_ADD %0, %0 ; CHECK: *** Bad machine code: Generic virtual register invalid in a Selected function *** - ; CHECK: instruction: %vreg2(s64) = COPY - ; CHECK: operand 0: %vreg2 + ; CHECK: instruction: %2(s64) = COPY + ; CHECK: operand 0: %2 %2(s64) = COPY %x0 ... Index: test/CodeGen/AArch64/aarch64-stp-cluster.ll =================================================================== --- test/CodeGen/AArch64/aarch64-stp-cluster.ll +++ test/CodeGen/AArch64/aarch64-stp-cluster.ll @@ -5,10 +5,10 @@ ; CHECK-LABEL: stp_i64_scale:BB#0 ; CHECK:Cluster ld/st SU(4) - SU(3) ; CHECK:Cluster ld/st SU(2) - SU(5) -; CHECK:SU(4): STRXui %vreg1, %vreg0, 1 -; CHECK:SU(3): STRXui %vreg1, %vreg0, 2 -; CHECK:SU(2): STRXui %vreg1, %vreg0, 3 -; CHECK:SU(5): STRXui %vreg1, %vreg0, 4 +; CHECK:SU(4): STRXui %1, %0, 1 +; CHECK:SU(3): STRXui %1, %0, 2 +; CHECK:SU(2): STRXui %1, %0, 3 +; CHECK:SU(5): STRXui %1, %0, 4 define i64 @stp_i64_scale(i64* nocapture %P, i64 %v) { entry: %arrayidx = getelementptr inbounds i64, i64* %P, i64 3 @@ -26,10 +26,10 @@ ; CHECK-LABEL: stp_i32_scale:BB#0 ; CHECK:Cluster ld/st SU(4) - SU(3) ; CHECK:Cluster ld/st SU(2) - SU(5) -; CHECK:SU(4): STRWui %vreg1, %vreg0, 1 -; CHECK:SU(3): STRWui %vreg1, %vreg0, 2 -; CHECK:SU(2): STRWui %vreg1, %vreg0, 3 -; CHECK:SU(5): STRWui %vreg1, %vreg0, 4 +; CHECK:SU(4): STRWui %1, %0, 1 +; CHECK:SU(3): STRWui %1, %0, 2 +; CHECK:SU(2): STRWui %1, %0, 3 +; CHECK:SU(5): STRWui %1, %0, 4 define i32 @stp_i32_scale(i32* nocapture %P, i32 %v) { entry: %arrayidx = getelementptr inbounds i32, i32* %P, i32 3 @@ -47,10 +47,10 @@ ; CHECK-LABEL:stp_i64_unscale:BB#0 entry ; CHECK:Cluster ld/st SU(5) - SU(2) ; CHECK:Cluster ld/st SU(4) - SU(3) -; CHECK:SU(5): STURXi %vreg1, %vreg0, -32 -; CHECK:SU(2): STURXi %vreg1, %vreg0, -24 -; CHECK:SU(4): STURXi %vreg1, %vreg0, -16 -; CHECK:SU(3): STURXi %vreg1, %vreg0, -8 +; CHECK:SU(5): STURXi %1, %0, -32 +; CHECK:SU(2): STURXi %1, %0, -24 +; CHECK:SU(4): STURXi %1, %0, -16 +; CHECK:SU(3): STURXi %1, %0, -8 define void @stp_i64_unscale(i64* nocapture %P, i64 %v) #0 { entry: %arrayidx = getelementptr inbounds i64, i64* %P, i64 -3 @@ -68,10 +68,10 @@ ; CHECK-LABEL:stp_i32_unscale:BB#0 entry ; CHECK:Cluster ld/st SU(5) - SU(2) ; CHECK:Cluster ld/st SU(4) - SU(3) -; CHECK:SU(5): STURWi %vreg1, %vreg0, -16 -; CHECK:SU(2): STURWi %vreg1, %vreg0, -12 -; CHECK:SU(4): STURWi %vreg1, %vreg0, -8 -; CHECK:SU(3): STURWi %vreg1, %vreg0, -4 +; CHECK:SU(5): STURWi %1, %0, -16 +; CHECK:SU(2): STURWi %1, %0, -12 +; CHECK:SU(4): STURWi %1, %0, -8 +; CHECK:SU(3): STURWi %1, %0, -4 define void @stp_i32_unscale(i32* nocapture %P, i32 %v) #0 { entry: %arrayidx = getelementptr inbounds i32, i32* %P, i32 -3 @@ -89,10 +89,10 @@ ; CHECK-LABEL:stp_double:BB#0 ; CHECK:Cluster ld/st SU(3) - SU(4) ; CHECK:Cluster ld/st SU(2) - SU(5) -; CHECK:SU(3): STRDui %vreg1, %vreg0, 1 -; CHECK:SU(4): STRDui %vreg1, %vreg0, 2 -; CHECK:SU(2): STRDui %vreg1, %vreg0, 3 -; CHECK:SU(5): STRDui %vreg1, %vreg0, 4 +; CHECK:SU(3): STRDui %1, %0, 1 +; CHECK:SU(4): STRDui %1, %0, 2 +; CHECK:SU(2): STRDui %1, %0, 3 +; CHECK:SU(5): STRDui %1, %0, 4 define void @stp_double(double* nocapture %P, double %v) { entry: %arrayidx = getelementptr inbounds double, double* %P, i64 3 @@ -110,10 +110,10 @@ ; CHECK-LABEL:stp_float:BB#0 ; CHECK:Cluster ld/st SU(3) - SU(4) ; CHECK:Cluster ld/st SU(2) - SU(5) -; CHECK:SU(3): STRSui %vreg1, %vreg0, 1 -; CHECK:SU(4): STRSui %vreg1, %vreg0, 2 -; CHECK:SU(2): STRSui %vreg1, %vreg0, 3 -; CHECK:SU(5): STRSui %vreg1, %vreg0, 4 +; CHECK:SU(3): STRSui %1, %0, 1 +; CHECK:SU(4): STRSui %1, %0, 2 +; CHECK:SU(2): STRSui %1, %0, 3 +; CHECK:SU(5): STRSui %1, %0, 4 define void @stp_float(float* nocapture %P, float %v) { entry: %arrayidx = getelementptr inbounds float, float* %P, i64 3 @@ -130,10 +130,10 @@ ; CHECK: ********** MI Scheduling ********** ; CHECK-LABEL: stp_volatile:BB#0 ; CHECK-NOT: Cluster ld/st -; CHECK:SU(2): STRXui %vreg1, %vreg0, 3; mem:Volatile -; CHECK:SU(3): STRXui %vreg1, %vreg0, 2; mem:Volatile -; CHECK:SU(4): STRXui %vreg1, %vreg0, 1; mem:Volatile -; CHECK:SU(5): STRXui %vreg1, %vreg0, 4; mem:Volatile +; CHECK:SU(2): STRXui %1, %0, 3; mem:Volatile +; CHECK:SU(3): STRXui %1, %0, 2; mem:Volatile +; CHECK:SU(4): STRXui %1, %0, 1; mem:Volatile +; CHECK:SU(5): STRXui %1, %0, 4; mem:Volatile define i64 @stp_volatile(i64* nocapture %P, i64 %v) { entry: %arrayidx = getelementptr inbounds i64, i64* %P, i64 3 Index: test/CodeGen/AArch64/arm64-fast-isel-rem.ll =================================================================== --- test/CodeGen/AArch64/arm64-fast-isel-rem.ll +++ test/CodeGen/AArch64/arm64-fast-isel-rem.ll @@ -4,9 +4,9 @@ ; CHECK-SSA-LABEL: Machine code for function t1 -; CHECK-SSA: [[QUOTREG:%vreg[0-9]+]] = SDIVWr +; CHECK-SSA: [[QUOTREG:%[0-9]+]] = SDIVWr ; CHECK-SSA-NOT: [[QUOTREG]] = -; CHECK-SSA: {{%vreg[0-9]+}} = MSUBWrrr [[QUOTREG]] +; CHECK-SSA: {{%[0-9]+}} = MSUBWrrr [[QUOTREG]] ; CHECK-SSA-LABEL: Machine code for function t2 Index: test/CodeGen/AArch64/arm64-ldp-cluster.ll =================================================================== --- test/CodeGen/AArch64/arm64-ldp-cluster.ll +++ test/CodeGen/AArch64/arm64-ldp-cluster.ll @@ -6,13 +6,13 @@ ; CHECK: ********** MI Scheduling ********** ; CHECK-LABEL: ldr_int:BB#0 ; CHECK: Cluster ld/st SU(1) - SU(2) -; CHECK: SU(1): %vreg{{[0-9]+}} = LDRWui -; CHECK: SU(2): %vreg{{[0-9]+}} = LDRWui +; CHECK: SU(1): %{{[0-9]+}} = LDRWui +; CHECK: SU(2): %{{[0-9]+}} = LDRWui ; EXYNOS: ********** MI Scheduling ********** ; EXYNOS-LABEL: ldr_int:BB#0 ; EXYNOS: Cluster ld/st SU(1) - SU(2) -; EXYNOS: SU(1): %vreg{{[0-9]+}} = LDRWui -; EXYNOS: SU(2): %vreg{{[0-9]+}} = LDRWui +; EXYNOS: SU(1): %{{[0-9]+}} = LDRWui +; EXYNOS: SU(2): %{{[0-9]+}} = LDRWui define i32 @ldr_int(i32* %a) nounwind { %p1 = getelementptr inbounds i32, i32* %a, i32 1 %tmp1 = load i32, i32* %p1, align 2 @@ -26,13 +26,13 @@ ; CHECK: ********** MI Scheduling ********** ; CHECK-LABEL: ldp_sext_int:BB#0 ; CHECK: Cluster ld/st SU(1) - SU(2) -; CHECK: SU(1): %vreg{{[0-9]+}} = LDRSWui -; CHECK: SU(2): %vreg{{[0-9]+}} = LDRSWui +; CHECK: SU(1): %{{[0-9]+}} = LDRSWui +; CHECK: SU(2): %{{[0-9]+}} = LDRSWui ; EXYNOS: ********** MI Scheduling ********** ; EXYNOS-LABEL: ldp_sext_int:BB#0 ; EXYNOS: Cluster ld/st SU(1) - SU(2) -; EXYNOS: SU(1): %vreg{{[0-9]+}} = LDRSWui -; EXYNOS: SU(2): %vreg{{[0-9]+}} = LDRSWui +; EXYNOS: SU(1): %{{[0-9]+}} = LDRSWui +; EXYNOS: SU(2): %{{[0-9]+}} = LDRSWui define i64 @ldp_sext_int(i32* %p) nounwind { %tmp = load i32, i32* %p, align 4 %add.ptr = getelementptr inbounds i32, i32* %p, i64 1 @@ -47,13 +47,13 @@ ; CHECK: ********** MI Scheduling ********** ; CHECK-LABEL: ldur_int:BB#0 ; CHECK: Cluster ld/st SU(2) - SU(1) -; CHECK: SU(1): %vreg{{[0-9]+}} = LDURWi -; CHECK: SU(2): %vreg{{[0-9]+}} = LDURWi +; CHECK: SU(1): %{{[0-9]+}} = LDURWi +; CHECK: SU(2): %{{[0-9]+}} = LDURWi ; EXYNOS: ********** MI Scheduling ********** ; EXYNOS-LABEL: ldur_int:BB#0 ; EXYNOS: Cluster ld/st SU(2) - SU(1) -; EXYNOS: SU(1): %vreg{{[0-9]+}} = LDURWi -; EXYNOS: SU(2): %vreg{{[0-9]+}} = LDURWi +; EXYNOS: SU(1): %{{[0-9]+}} = LDURWi +; EXYNOS: SU(2): %{{[0-9]+}} = LDURWi define i32 @ldur_int(i32* %a) nounwind { %p1 = getelementptr inbounds i32, i32* %a, i32 -1 %tmp1 = load i32, i32* %p1, align 2 @@ -67,13 +67,13 @@ ; CHECK: ********** MI Scheduling ********** ; CHECK-LABEL: ldp_half_sext_zext_int:BB#0 ; CHECK: Cluster ld/st SU(3) - SU(4) -; CHECK: SU(3): %vreg{{[0-9]+}} = LDRSWui -; CHECK: SU(4): %vreg{{[0-9]+}}:sub_32 = LDRWui +; CHECK: SU(3): %{{[0-9]+}} = LDRSWui +; CHECK: SU(4): %{{[0-9]+}}:sub_32 = LDRWui ; EXYNOS: ********** MI Scheduling ********** ; EXYNOS-LABEL: ldp_half_sext_zext_int:BB#0 ; EXYNOS: Cluster ld/st SU(3) - SU(4) -; EXYNOS: SU(3): %vreg{{[0-9]+}} = LDRSWui -; EXYNOS: SU(4): %vreg{{[0-9]+}}:sub_32 = LDRWui +; EXYNOS: SU(3): %{{[0-9]+}} = LDRSWui +; EXYNOS: SU(4): %{{[0-9]+}}:sub_32 = LDRWui define i64 @ldp_half_sext_zext_int(i64* %q, i32* %p) nounwind { %tmp0 = load i64, i64* %q, align 4 %tmp = load i32, i32* %p, align 4 @@ -90,13 +90,13 @@ ; CHECK: ********** MI Scheduling ********** ; CHECK-LABEL: ldp_half_zext_sext_int:BB#0 ; CHECK: Cluster ld/st SU(3) - SU(4) -; CHECK: SU(3): %vreg{{[0-9]+}}:sub_32 = LDRWui -; CHECK: SU(4): %vreg{{[0-9]+}} = LDRSWui +; CHECK: SU(3): %{{[0-9]+}}:sub_32 = LDRWui +; CHECK: SU(4): %{{[0-9]+}} = LDRSWui ; EXYNOS: ********** MI Scheduling ********** ; EXYNOS-LABEL: ldp_half_zext_sext_int:BB#0 ; EXYNOS: Cluster ld/st SU(3) - SU(4) -; EXYNOS: SU(3): %vreg{{[0-9]+}}:sub_32 = LDRWui -; EXYNOS: SU(4): %vreg{{[0-9]+}} = LDRSWui +; EXYNOS: SU(3): %{{[0-9]+}}:sub_32 = LDRWui +; EXYNOS: SU(4): %{{[0-9]+}} = LDRSWui define i64 @ldp_half_zext_sext_int(i64* %q, i32* %p) nounwind { %tmp0 = load i64, i64* %q, align 4 %tmp = load i32, i32* %p, align 4 @@ -113,13 +113,13 @@ ; CHECK: ********** MI Scheduling ********** ; CHECK-LABEL: ldr_int_volatile:BB#0 ; CHECK-NOT: Cluster ld/st -; CHECK: SU(1): %vreg{{[0-9]+}} = LDRWui -; CHECK: SU(2): %vreg{{[0-9]+}} = LDRWui +; CHECK: SU(1): %{{[0-9]+}} = LDRWui +; CHECK: SU(2): %{{[0-9]+}} = LDRWui ; EXYNOS: ********** MI Scheduling ********** ; EXYNOS-LABEL: ldr_int_volatile:BB#0 ; EXYNOS-NOT: Cluster ld/st -; EXYNOS: SU(1): %vreg{{[0-9]+}} = LDRWui -; EXYNOS: SU(2): %vreg{{[0-9]+}} = LDRWui +; EXYNOS: SU(1): %{{[0-9]+}} = LDRWui +; EXYNOS: SU(2): %{{[0-9]+}} = LDRWui define i32 @ldr_int_volatile(i32* %a) nounwind { %p1 = getelementptr inbounds i32, i32* %a, i32 1 %tmp1 = load volatile i32, i32* %p1, align 2 @@ -133,8 +133,8 @@ ; CHECK: ********** MI Scheduling ********** ; CHECK-LABEL: ldq_cluster:BB#0 ; CHECK: Cluster ld/st SU(1) - SU(3) -; CHECK: SU(1): %vreg{{[0-9]+}} = LDRQui -; CHECK: SU(3): %vreg{{[0-9]+}} = LDRQui +; CHECK: SU(1): %{{[0-9]+}} = LDRQui +; CHECK: SU(3): %{{[0-9]+}} = LDRQui ; EXYNOS: ********** MI Scheduling ********** ; EXYNOS-LABEL: ldq_cluster:BB#0 ; EXYNOS-NOT: Cluster ld/st Index: test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll =================================================================== --- test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll +++ test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll @@ -6,10 +6,10 @@ ; ; CHECK: ********** MI Scheduling ********** ; CHECK: shiftable -; CHECK: SU(2): %vreg2 = SUBXri %vreg1, 20, 0 +; CHECK: SU(2): %2 = SUBXri %1, 20, 0 ; CHECK: Successors: -; CHECK-NEXT: SU(4): Data Latency=1 Reg=%vreg2 -; CHECK-NEXT: SU(3): Data Latency=2 Reg=%vreg2 +; CHECK-NEXT: SU(4): Data Latency=1 Reg=%2 +; CHECK-NEXT: SU(3): Data Latency=2 Reg=%2 ; CHECK: ********** INTERVALS ********** define i64 @shiftable(i64 %A, i64 %B) { %tmp0 = sub i64 %B, 20 Index: test/CodeGen/AArch64/arm64-misched-memdep-bug.ll =================================================================== --- test/CodeGen/AArch64/arm64-misched-memdep-bug.ll +++ test/CodeGen/AArch64/arm64-misched-memdep-bug.ll @@ -5,15 +5,15 @@ ; ; CHECK: ********** MI Scheduling ********** ; CHECK: misched_bug:BB#0 entry -; CHECK: SU(2): %vreg2 = LDRWui %vreg0, 1; mem:LD4[%ptr1_plus1] GPR32:%vreg2 GPR64common:%vreg0 +; CHECK: SU(2): %2 = LDRWui %0, 1; mem:LD4[%ptr1_plus1] GPR32:%2 GPR64common:%0 ; CHECK: Successors: -; CHECK-NEXT: SU(5): Data Latency=4 Reg=%vreg2 +; CHECK-NEXT: SU(5): Data Latency=4 Reg=%2 ; CHECK-NEXT: SU(4): Ord Latency=0 -; CHECK: SU(3): STRWui %wzr, %vreg0, 0; mem:ST4[%ptr1] GPR64common:%vreg0 +; CHECK: SU(3): STRWui %wzr, %0, 0; mem:ST4[%ptr1] GPR64common:%0 ; CHECK: Successors: ; CHECK: SU(4): Ord Latency=0 -; CHECK: SU(4): STRWui %wzr, %vreg1, 0; mem:ST4[%ptr2] GPR64common:%vreg1 -; CHECK: SU(5): %w0 = COPY %vreg2; GPR32:%vreg2 +; CHECK: SU(4): STRWui %wzr, %1, 0; mem:ST4[%ptr2] GPR64common:%1 +; CHECK: SU(5): %w0 = COPY %2; GPR32:%2 ; CHECK: ** ScheduleDAGMI::schedule picking next node define i32 @misched_bug(i32* %ptr1, i32* %ptr2) { entry: Index: test/CodeGen/AArch64/tailcall_misched_graph.ll =================================================================== --- test/CodeGen/AArch64/tailcall_misched_graph.ll +++ test/CodeGen/AArch64/tailcall_misched_graph.ll @@ -26,9 +26,9 @@ ; CHECK: fi#-2: {{.*}} fixed, at location [SP+8] ; CHECK: fi#-1: {{.*}} fixed, at location [SP] -; CHECK: [[VRA:%vreg.*]] = LDRXui -; CHECK: [[VRB:%vreg.*]] = LDRXui -; CHECK: STRXui %vreg{{.*}}, +; CHECK: [[VRA:%.*]] = LDRXui +; CHECK: [[VRB:%.*]] = LDRXui +; CHECK: STRXui %{{.*}}, ; CHECK: STRXui [[VRB]], ; Make sure that there is an dependence edge between fi#-2 and fi#-4. @@ -40,5 +40,5 @@ ; CHECK: SU([[DEPSTOREB:.*]]): Ord Latency=0 ; CHECK: SU([[DEPSTOREA:.*]]): Ord Latency=0 -; CHECK: SU([[DEPSTOREA]]): STRXui %vreg{{.*}}, -; CHECK: SU([[DEPSTOREB]]): STRXui %vreg{{.*}}, +; CHECK: SU([[DEPSTOREA]]): STRXui %{{.*}}, +; CHECK: SU([[DEPSTOREB]]): STRXui %{{.*}}, Index: test/CodeGen/AMDGPU/lds-output-queue.ll =================================================================== --- test/CodeGen/AMDGPU/lds-output-queue.ll +++ test/CodeGen/AMDGPU/lds-output-queue.ll @@ -46,20 +46,20 @@ ; ; The instruction selection phase will generate ISA that looks like this: ; %OQAP = LDS_READ_RET -; %vreg0 = MOV %OQAP -; %vreg1 = VTX_READ_32 -; %vreg2 = ADD_INT %vreg1, %vreg0 +; %0 = MOV %OQAP +; %1 = VTX_READ_32 +; %2 = ADD_INT %1, %0 ; ; The bottom scheduler will schedule the two ALU instructions first: ; ; UNSCHEDULED: ; %OQAP = LDS_READ_RET -; %vreg1 = VTX_READ_32 +; %1 = VTX_READ_32 ; ; SCHEDULED: ; ; vreg0 = MOV %OQAP -; vreg2 = ADD_INT %vreg1, %vreg2 +; vreg2 = ADD_INT %1, %2 ; ; The lack of proper aliasing results in the local memory read (LDS_READ_RET) ; to consider the global memory read (VTX_READ_32) has a chain dependency, so @@ -69,10 +69,10 @@ ; Alu clause: ; %OQAP = LDS_READ_RET ; VTX clause: -; %vreg1 = VTX_READ_32 +; %1 = VTX_READ_32 ; Alu clause: ; vreg0 = MOV %OQAP -; vreg2 = ADD_INT %vreg1, %vreg2 +; vreg2 = ADD_INT %1, %2 ; ; This is an illegal program because the OQAP def and use know occur in ; different ALU clauses. Index: test/CodeGen/AMDGPU/liveness.mir =================================================================== --- test/CodeGen/AMDGPU/liveness.mir +++ test/CodeGen/AMDGPU/liveness.mir @@ -6,7 +6,7 @@ # liveranges needed it. # # Should see three distinct value numbers: -# CHECK: %vreg0 [{{.*}}:0)[{{.*}}:1)[{{.*}}:2) 0@{{[0-9]+[Berd]}} 1@{{[0-9]+[Berd]}} 2@{{[0-9]+B-phi}} +# CHECK: %0 [{{.*}}:0)[{{.*}}:1)[{{.*}}:2) 0@{{[0-9]+[Berd]}} 1@{{[0-9]+[Berd]}} 2@{{[0-9]+B-phi}} --- | define amdgpu_kernel void @test0() { ret void } ... Index: test/CodeGen/AMDGPU/spill-empty-live-interval.mir =================================================================== --- test/CodeGen/AMDGPU/spill-empty-live-interval.mir +++ test/CodeGen/AMDGPU/spill-empty-live-interval.mir @@ -2,7 +2,7 @@ # https://bugs.llvm.org/show_bug.cgi?id=33620 --- -# This would assert due to the empty live interval created for %vreg9 +# This would assert due to the empty live interval created for %9 # on the last S_NOP with an undef subreg use. # CHECK-LABEL: name: expecting_non_empty_interval Index: test/CodeGen/AMDGPU/subreg-intervals.mir =================================================================== --- test/CodeGen/AMDGPU/subreg-intervals.mir +++ test/CodeGen/AMDGPU/subreg-intervals.mir @@ -2,11 +2,11 @@ # REQUIRES: asserts # CHECK: INTERVALS -# CHECK: vreg0 +# CHECK: %0 # CHECK-LABEL: Machine code for function test0: # CHECK: INTERVALS -# CHECK: vreg0 +# CHECK: %0 # CHECK-LABEL: Machine code for function test1: --- | Index: test/CodeGen/ARM/2011-11-14-EarlyClobber.ll =================================================================== --- test/CodeGen/ARM/2011-11-14-EarlyClobber.ll +++ test/CodeGen/ARM/2011-11-14-EarlyClobber.ll @@ -5,11 +5,11 @@ ; This test calls shrinkToUses with an early-clobber redefined live range during ; spilling. ; -; Shrink: %vreg47,1.158257e-02 = [384r,400e:0)[400e,420r:1) 0@384r 1@400e +; Shrink: %47,1.158257e-02 = [384r,400e:0)[400e,420r:1) 0@384r 1@400e ; ; The early-clobber instruction is an str: ; -; %vreg12 = t2STR_PRE %vreg6, %vreg12, 32, pred:14, pred:_ +; %12 = t2STR_PRE %6, %12, 32, pred:14, pred:_ ; ; This tests that shrinkToUses handles the EC redef correctly. Index: test/CodeGen/ARM/Windows/dbzchk.ll =================================================================== --- test/CodeGen/ARM/Windows/dbzchk.ll +++ test/CodeGen/ARM/Windows/dbzchk.ll @@ -119,7 +119,7 @@ ; CHECK-CFG-DAG: t2B ; CHECK-CFG-DAG: BB#2 -; CHECK-CFG-DAG: tCMPi8 %vreg{{[0-9]}}, 0 +; CHECK-CFG-DAG: tCMPi8 %{{[0-9]}}, 0 ; CHECK-CFG-DAG: t2Bcc ; CHECK-CFG-DAG: BB#4 Index: test/CodeGen/ARM/crash-greedy.ll =================================================================== --- test/CodeGen/ARM/crash-greedy.ll +++ test/CodeGen/ARM/crash-greedy.ll @@ -61,7 +61,7 @@ ; CHECK: insert_elem ; This test has a sub-register copy with a kill flag: -; %vreg6:ssub_3 = COPY %vreg6:ssub_2; QPR_VFP2:%vreg6 +; %6:ssub_3 = COPY %6:ssub_2; QPR_VFP2:%6 ; The rewriter must do something sensible with that, or the scavenger crashes. define void @insert_elem() nounwind { entry: Index: test/CodeGen/ARM/misched-copy-arm.ll =================================================================== --- test/CodeGen/ARM/misched-copy-arm.ll +++ test/CodeGen/ARM/misched-copy-arm.ll @@ -33,9 +33,9 @@ ; This case was a crasher in constrainLocalCopy. ; The problem was the t2LDR_PRE defining both the global and local lrg. ; CHECK-LABEL: *** Final schedule for BB#5 *** -; CHECK: %[[R4:vreg[0-9]+]], %[[R1:vreg[0-9]+]] = t2LDR_PRE %[[R1]] -; CHECK: %vreg{{[0-9]+}} = COPY %[[R1]] -; CHECK: %vreg{{[0-9]+}} = COPY %[[R4]] +; CHECK: %[[R4:[0-9]+]], %[[R1:[0-9]+]] = t2LDR_PRE %[[R1]] +; CHECK: %{{[0-9]+}} = COPY %[[R1]] +; CHECK: %{{[0-9]+}} = COPY %[[R4]] ; CHECK-LABEL: MACHINEINSTRS %struct.rtx_def = type { [4 x i8], [1 x %union.rtunion_def] } %union.rtunion_def = type { i64 } Index: test/CodeGen/ARM/misched-int-basic-thumb2.mir =================================================================== --- test/CodeGen/ARM/misched-int-basic-thumb2.mir +++ test/CodeGen/ARM/misched-int-basic-thumb2.mir @@ -37,62 +37,62 @@ } # # CHECK: ********** MI Scheduling ********** -# CHECK: SU(2): %vreg2 = t2MOVi32imm ; rGPR:%vreg2 +# CHECK: SU(2): %2 = t2MOVi32imm ; rGPR:%2 # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 2 # CHECK_R52: Latency : 2 # -# CHECK: SU(3): %vreg3 = t2LDRi12 %vreg2, 0, pred:14, pred:_; mem:LD4[@g1](dereferenceable) rGPR:%vreg3,%vreg2 +# CHECK: SU(3): %3 = t2LDRi12 %2, 0, pred:14, pred:_; mem:LD4[@g1](dereferenceable) rGPR:%3,%2 # CHECK_A9: Latency : 1 # CHECK_SWIFT: Latency : 3 # CHECK_R52: Latency : 4 # -# CHECK : SU(6): %vreg6 = t2ADDrr %vreg3, %vreg3, pred:14, pred:_, opt:_; rGPR:%vreg6,%vreg3,%vreg3 +# CHECK : SU(6): %6 = t2ADDrr %3, %3, pred:14, pred:_, opt:_; rGPR:%6,%3,%3 # CHECK_A9: Latency : 1 # CHECK_SWIFT: Latency : 1 # CHECK_R52: Latency : 3 -# CHECK: SU(7): %vreg7 = t2SDIV %vreg6, %vreg5, pred:14, pred:_; rGPR:%vreg7,%vreg6,%vreg5 +# CHECK: SU(7): %7 = t2SDIV %6, %5, pred:14, pred:_; rGPR:%7,%6,%5 # CHECK_A9: Latency : 0 # CHECK_SWIFT: Latency : 14 # CHECK_R52: Latency : 8 -# CHECK: SU(8): t2STRi12 %vreg7, %vreg2, 0, pred:14, pred:_; mem:ST4[@g1] rGPR:%vreg7,%vreg2 +# CHECK: SU(8): t2STRi12 %7, %2, 0, pred:14, pred:_; mem:ST4[@g1] rGPR:%7,%2 # CHECK_A9: Latency : 1 # CHECK_SWIFT: Latency : 0 # CHECK_R52: Latency : 4 # -# CHECK: SU(9): %vreg8 = t2SMULBB %vreg1, %vreg1, pred:14, pred:_; rGPR:%vreg8,%vreg1,%vreg1 +# CHECK: SU(9): %8 = t2SMULBB %1, %1, pred:14, pred:_; rGPR:%8,%1,%1 # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(10): %vreg9 = t2SMLABB %vreg0, %vreg0, %vreg8, pred:14, pred:_; rGPR:%vreg9,%vreg0,%vreg0,%vreg8 +# CHECK: SU(10): %9 = t2SMLABB %0, %0, %8, pred:14, pred:_; rGPR:%9,%0,%0,%8 # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(11): %vreg10 = t2UXTH %vreg9, 0, pred:14, pred:_; rGPR:%vreg10,%vreg9 +# CHECK: SU(11): %10 = t2UXTH %9, 0, pred:14, pred:_; rGPR:%10,%9 # CHECK_A9: Latency : 1 # CHECK_SWIFT: Latency : 1 # CHECK_R52: Latency : 3 # -# CHECK: SU(12): %vreg11 = t2MUL %vreg10, %vreg7, pred:14, pred:_; rGPR:%vreg11,%vreg10,%vreg7 +# CHECK: SU(12): %11 = t2MUL %10, %7, pred:14, pred:_; rGPR:%11,%10,%7 # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(13): %vreg12 = t2MLA %vreg11, %vreg11, %vreg11, pred:14, pred:_; rGPR:%vreg12,%vreg11,%vreg11,%vreg11 +# CHECK: SU(13): %12 = t2MLA %11, %11, %11, pred:14, pred:_; rGPR:%12,%11,%11,%11 # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(14): %vreg13, %vreg14 = t2UMULL %vreg12, %vreg12, pred:14, pred:_; rGPR:%vreg13,%vreg14,%vreg12,%vreg12 +# CHECK: SU(14): %13, %14 = t2UMULL %12, %12, pred:14, pred:_; rGPR:%13,%14,%12,%12 # CHECK_A9: Latency : 3 # CHECK_SWIFT: Latency : 5 # CHECK_R52: Latency : 4 # -# CHECK: SU(18): %vreg19, %vreg20 = t2UMLAL %vreg12, %vreg12, %vreg19, %vreg20, pred:14, pred:_; rGPR:%vreg19,%vreg20,%vreg12,%vreg12,%vreg20 +# CHECK: SU(18): %19, %20 = t2UMLAL %12, %12, %19, %20, pred:14, pred:_; rGPR:%19,%20,%12,%12,%20 # CHECK_A9: Latency : 3 # CHECK_SWIFT: Latency : 7 # CHECK_R52: Latency : 4 Index: test/CodeGen/ARM/misched-int-basic.mir =================================================================== --- test/CodeGen/ARM/misched-int-basic.mir +++ test/CodeGen/ARM/misched-int-basic.mir @@ -28,37 +28,37 @@ } # CHECK: ********** MI Scheduling ********** -# CHECK: SU(2): %vreg2 = SMULBB %vreg1, %vreg1, pred:14, pred:_; GPR:%vreg2,%vreg1,%vreg1 +# CHECK: SU(2): %2 = SMULBB %1, %1, pred:14, pred:_; GPR:%2,%1,%1 # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(3): %vreg3 = SMLABB %vreg0, %vreg0, %vreg2, pred:14, pred:_; GPRnopc:%vreg3,%vreg0,%vreg0 GPR:%vreg2 +# CHECK: SU(3): %3 = SMLABB %0, %0, %2, pred:14, pred:_; GPRnopc:%3,%0,%0 GPR:%2 # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(4): %vreg4 = UXTH %vreg3, 0, pred:14, pred:_; GPRnopc:%vreg4,%vreg3 +# CHECK: SU(4): %4 = UXTH %3, 0, pred:14, pred:_; GPRnopc:%4,%3 # CHECK_A9: Latency : 1 # CHECK_SWIFT: Latency : 1 # CHECK_R52: Latency : 3 # -# CHECK: SU(5): %vreg5 = MUL %vreg4, %vreg4, pred:14, pred:_, opt:_; GPRnopc:%vreg5,%vreg4,%vreg4 +# CHECK: SU(5): %5 = MUL %4, %4, pred:14, pred:_, opt:_; GPRnopc:%5,%4,%4 # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(6): %vreg6 = MLA %vreg5, %vreg5, %vreg5, pred:14, pred:_, opt:_; GPRnopc:%vreg6,%vreg5,%vreg5,%vreg5 +# CHECK: SU(6): %6 = MLA %5, %5, %5, pred:14, pred:_, opt:_; GPRnopc:%6,%5,%5,%5 # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(7): %vreg7, %vreg8 = UMULL %vreg6, %vreg6, pred:14, pred:_, opt:_; GPRnopc:%vreg7,%vreg8,%vreg6,%vreg6 +# CHECK: SU(7): %7, %8 = UMULL %6, %6, pred:14, pred:_, opt:_; GPRnopc:%7,%8,%6,%6 # CHECK_A9: Latency : 3 # CHECK_SWIFT: Latency : 5 # CHECK_R52: Latency : 4 # -# CHECK: SU(11): %vreg13, %vreg14 = UMLAL %vreg6, %vreg6, %vreg13, %vreg14, pred:14, pred:_, opt:_; GPR:%vreg13 GPRnopc:%vreg14,%vreg6,%vreg6 +# CHECK: SU(11): %13, %14 = UMLAL %6, %6, %13, %14, pred:14, pred:_, opt:_; GPR:%13 GPRnopc:%14,%6,%6 # CHECK_SWIFT: Latency : 7 # CHECK_A9: Latency : 3 # CHECK_R52: Latency : 4 Index: test/CodeGen/ARM/single-issue-r52.mir =================================================================== --- test/CodeGen/ARM/single-issue-r52.mir +++ test/CodeGen/ARM/single-issue-r52.mir @@ -20,22 +20,22 @@ # CHECK: ********** MI Scheduling ********** # CHECK: ScheduleDAGMILive::schedule starting -# CHECK: SU(1): %vreg1 = VLD4d8Pseudo %vreg0, 8, pred:14, pred:_; mem:LD32[%A](align=8) QQPR:%vreg1 GPR:%vreg0 +# CHECK: SU(1): %1 = VLD4d8Pseudo %0, 8, pred:14, pred:_; mem:LD32[%A](align=8) QQPR:%1 GPR:%0 # CHECK: Latency : 8 # CHECK: Single Issue : true; -# CHECK: SU(2): %vreg4 = VADDv8i8 %vreg1:dsub_0, %vreg1:dsub_1, pred:14, pred:_; DPR:%vreg4 QQPR:%vreg1 +# CHECK: SU(2): %4 = VADDv8i8 %1:dsub_0, %1:dsub_1, pred:14, pred:_; DPR:%4 QQPR:%1 # CHECK: Latency : 5 # CHECK: Single Issue : false; -# CHECK: SU(3): %vreg5, %vreg6 = VMOVRRD %vreg4, pred:14, pred:_; GPR:%vreg5,%vreg6 DPR:%vreg4 +# CHECK: SU(3): %5, %6 = VMOVRRD %4, pred:14, pred:_; GPR:%5,%6 DPR:%4 # CHECK: Latency : 4 # CHECK: Single Issue : false; -# TOPDOWN: Scheduling SU(1) %vreg1 = VLD4d8Pseudo +# TOPDOWN: Scheduling SU(1) %1 = VLD4d8Pseudo # TOPDOWN: Bump cycle to end group -# TOPDOWN: Scheduling SU(2) %vreg4 = VADDv8i8 +# TOPDOWN: Scheduling SU(2) %4 = VADDv8i8 -# BOTTOMUP: Scheduling SU(2) %vreg4 = VADDv8i8 -# BOTTOMUP: Scheduling SU(1) %vreg1 = VLD4d8Pseudo +# BOTTOMUP: Scheduling SU(2) %4 = VADDv8i8 +# BOTTOMUP: Scheduling SU(1) %1 = VLD4d8Pseudo # BOTTOMUP: Bump cycle to begin group ... Index: test/CodeGen/ARM/subreg-remat.ll =================================================================== --- test/CodeGen/ARM/subreg-remat.ll +++ test/CodeGen/ARM/subreg-remat.ll @@ -4,10 +4,10 @@ ; ; The vector %v2 is built like this: ; -; %vreg6:ssub_1 = ... -; %vreg6:ssub_0 = VLDRS , 0, pred:14, pred:_; mem:LD4[ConstantPool] DPR_VFP2:%vreg6 +; %6:ssub_1 = ... +; %6:ssub_0 = VLDRS , 0, pred:14, pred:_; mem:LD4[ConstantPool] DPR_VFP2:%6 ; -; When %vreg6 spills, the VLDRS constant pool load cannot be rematerialized +; When %6 spills, the VLDRS constant pool load cannot be rematerialized ; since it implicitly reads the ssub_1 sub-register. ; ; CHECK: f1 @@ -31,7 +31,7 @@ ; because the bits are undef, we should rematerialize. The vector is now built ; like this: ; -; %vreg2:ssub_0 = VLDRS , 0, pred:14, pred:_, %vreg2; mem:LD4[ConstantPool] +; %2:ssub_0 = VLDRS , 0, pred:14, pred:_, %2; mem:LD4[ConstantPool] ; ; The extra operand indicates that the instruction fully defines the ; virtual register. It doesn't read the old value. Index: test/CodeGen/Hexagon/circ_ldd_bug.ll =================================================================== --- test/CodeGen/Hexagon/circ_ldd_bug.ll +++ test/CodeGen/Hexagon/circ_ldd_bug.ll @@ -7,9 +7,9 @@ ; UNREACHABLE executed at llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp:615! ; This happened because after unrolling a loop with a ldd_circ instruction we ; would have several TFCR and ldd_circ instruction sequences. -; %vreg0 (CRRegs) = TFCR %vreg0 (IntRegs) +; %0 (CRRegs) = TFCR %0 (IntRegs) ; = ldd_circ( , , vreg0) -; %vreg1 (CRRegs) = TFCR %vreg1 (IntRegs) +; %1 (CRRegs) = TFCR %1 (IntRegs) ; = ldd_circ( , , vreg0) ; The scheduler would move the CRRegs to the top of the loop. The allocator ; would try to spill the CRRegs after running out of them. We don't have code to Index: test/CodeGen/Hexagon/post-inc-aa-metadata.ll =================================================================== --- test/CodeGen/Hexagon/post-inc-aa-metadata.ll +++ test/CodeGen/Hexagon/post-inc-aa-metadata.ll @@ -3,7 +3,7 @@ ; Check that the generated post-increment load has TBAA information. ; CHECK-LABEL: Machine code for function fred: -; CHECK: = V6_vL32b_pi %vreg{{[0-9]+}}, 64; mem:LD64[{{.*}}](tbaa= +; CHECK: = V6_vL32b_pi %{{[0-9]+}}, 64; mem:LD64[{{.*}}](tbaa= target triple = "hexagon" Index: test/CodeGen/Lanai/lanai-misched-trivial-disjoint.ll =================================================================== --- test/CodeGen/Lanai/lanai-misched-trivial-disjoint.ll +++ test/CodeGen/Lanai/lanai-misched-trivial-disjoint.ll @@ -36,7 +36,7 @@ ; CHECK-LABEL: SU({{.*}}): SW_RI{{.*}}, 4, ; CHECK: # preds left : 2 ; CHECK: # succs left : 0 -; CHECK-LABEL: SU({{.*}}): %vreg{{.*}} = LDW_RI{{.*}}, 12, +; CHECK-LABEL: SU({{.*}}): %{{.*}} = LDW_RI{{.*}}, 12, ; CHECK: # preds left : 1 ; CHECK: # succs left : 4 ; CHECK-LABEL: SU({{.*}}): STH_RI{{.*}}, 10, Index: test/CodeGen/PowerPC/quadint-return.ll =================================================================== --- test/CodeGen/PowerPC/quadint-return.ll +++ test/CodeGen/PowerPC/quadint-return.ll @@ -14,6 +14,6 @@ ; CHECK: ********** Function: foo ; CHECK: ********** FAST REGISTER ALLOCATION ********** -; CHECK: %x3 = COPY %vreg -; CHECK-NEXT: %x4 = COPY %vreg +; CHECK: %x3 = COPY %{{[0-9]+}} +; CHECK-NEXT: %x4 = COPY %{{[0-9]+}} ; CHECK-NEXT: BLR Index: test/CodeGen/WebAssembly/dbgvalue.ll =================================================================== --- test/CodeGen/WebAssembly/dbgvalue.ll +++ test/CodeGen/WebAssembly/dbgvalue.ll @@ -1,7 +1,7 @@ ; RUN: llc < %s -O0 -verify-machineinstrs -mtriple=wasm32-unknown-unknown-wasm | FileCheck %s ; CHECK: BB#0 -; CHECK: #DEBUG_VALUE: usage:self <- %vreg4 +; CHECK: #DEBUG_VALUE: usage:self <- %4 ; CHECK: BB#1 ; CHECK: DW_TAG_variable source_filename = "test/CodeGen/WebAssembly/dbgvalue.ll" Index: test/CodeGen/X86/GlobalISel/x86_64-fallback.ll =================================================================== --- test/CodeGen/X86/GlobalISel/x86_64-fallback.ll +++ test/CodeGen/X86/GlobalISel/x86_64-fallback.ll @@ -8,7 +8,7 @@ ; the fallback path. ; Check that we fallback on invoke translation failures. -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %vreg1, %vreg0; mem:ST10[%ptr](align=16) (in function: test_x86_fp80_dump) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %1, %0; mem:ST10[%ptr](align=16) (in function: test_x86_fp80_dump) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for test_x86_fp80_dump ; FALLBACK-WITH-REPORT-OUT-LABEL: test_x86_fp80_dump: define void @test_x86_fp80_dump(x86_fp80* %ptr){ Index: test/CodeGen/X86/cmovcmov.ll =================================================================== --- test/CodeGen/X86/cmovcmov.ll +++ test/CodeGen/X86/cmovcmov.ll @@ -227,8 +227,8 @@ ; The following test failed because llvm had a bug where a structure like: ; -; %vreg12 = CMOV_GR8 %vreg7, %vreg11 ... (lt) -; %vreg13 = CMOV_GR8 %vreg12, %vreg11 ... (gt) +; %12 = CMOV_GR8 %7, %11 ... (lt) +; %13 = CMOV_GR8 %12, %11 ... (gt) ; ; was lowered to: ; @@ -241,7 +241,7 @@ ; BB#9: ; vreg12 = phi(vreg7, BB#8, vreg11, BB#0, vreg12, BB#7) ; vreg13 = COPY vreg12 -; Which was invalid as %vreg12 is not the same value as %vreg13 +; Which was invalid as %12 is not the same value as %13 ; CHECK-LABEL: no_cascade_opt: ; CMOV-DAG: cmpl %edx, %esi Index: test/CodeGen/X86/coalescer-dce.ll =================================================================== --- test/CodeGen/X86/coalescer-dce.ll +++ test/CodeGen/X86/coalescer-dce.ll @@ -4,28 +4,28 @@ ; This test case has a sub-register join followed by a remat: ; -; 256L %vreg2 = COPY %vreg7:sub_32bit; GR32:%vreg2 GR64:%vreg7 -; Considering merging %vreg2 with %vreg7:sub_32bit +; 256L %2 = COPY %7:sub_32bit; GR32:%2 GR64:%7 +; Considering merging %2 with %7:sub_32bit ; Cross-class to GR64. -; RHS = %vreg2 = [256d,272d:0) 0@256d -; LHS = %vreg7 = [208d,256d:0)[304L,480L:0) 0@208d -; updated: 272L %vreg0 = COPY %vreg7:sub_32bit; GR32:%vreg0 GR64:%vreg7 -; Joined. Result = %vreg7 = [208d,272d:0)[304L,480L:0) 0@208d +; RHS = %2 = [256d,272d:0) 0@256d +; LHS = %7 = [208d,256d:0)[304L,480L:0) 0@208d +; updated: 272L %0 = COPY %7:sub_32bit; GR32:%0 GR64:%7 +; Joined. Result = %7 = [208d,272d:0)[304L,480L:0) 0@208d ; -; 272L %vreg10:sub_32bit = COPY %vreg7:sub_32bit, %vreg10; GR64:%vreg10,%vreg7 -; Considering merging %vreg7 with %vreg10 -; RHS = %vreg7 = [208d,272d:0)[304L,480L:0) 0@208d -; LHS = %vreg10 = [16d,64L:2)[64L,160L:1)[192L,240L:1)[272d,304L:3)[304L,352d:1)[352d,400d:0)[400d,400S:4) 0@352d 1@64L-phidef 2@16d-phikill 3@272d-phikill 4@400d -; Remat: %vreg10 = MOV64r0 %vreg10, %EFLAGS, %vreg10; GR64:%vreg10 -; Shrink: %vreg7 = [208d,272d:0)[304L,480L:0) 0@208d +; 272L %10:sub_32bit = COPY %7:sub_32bit, %10; GR64:%10,%7 +; Considering merging %7 with %10 +; RHS = %7 = [208d,272d:0)[304L,480L:0) 0@208d +; LHS = %10 = [16d,64L:2)[64L,160L:1)[192L,240L:1)[272d,304L:3)[304L,352d:1)[352d,400d:0)[400d,400S:4) 0@352d 1@64L-phidef 2@16d-phikill 3@272d-phikill 4@400d +; Remat: %10 = MOV64r0 %10, %EFLAGS, %10; GR64:%10 +; Shrink: %7 = [208d,272d:0)[304L,480L:0) 0@208d ; live-in at 240L ; live-in at 416L ; live-in at 320L ; live-in at 304L -; Shrunk: %vreg7 = [208d,256d:0)[304L,480L:0) 0@208d +; Shrunk: %7 = [208d,256d:0)[304L,480L:0) 0@208d ; ; The COPY at 256L is rewritten as a partial def, and that would artificially -; extend the live range of %vreg7 to end at 256d. When the joined copy is +; extend the live range of %7 to end at 256d. When the joined copy is ; removed, -verify-coalescing complains about the dangling kill. ; ; Index: test/CodeGen/X86/crash.ll =================================================================== --- test/CodeGen/X86/crash.ll +++ test/CodeGen/X86/crash.ll @@ -481,10 +481,10 @@ ; Check coalescing of IMPLICIT_DEF instructions: ; -; %vreg1 = IMPLICIT_DEF -; %vreg2 = MOV32r0 +; %1 = IMPLICIT_DEF +; %2 = MOV32r0 ; -; When coalescing %vreg1 and %vreg2, the IMPLICIT_DEF instruction should be +; When coalescing %1 and %2, the IMPLICIT_DEF instruction should be ; erased along with its value number. ; define void @rdar12474033() nounwind ssp { Index: test/CodeGen/X86/handle-move.ll =================================================================== --- test/CodeGen/X86/handle-move.ll +++ test/CodeGen/X86/handle-move.ll @@ -8,8 +8,8 @@ ; %edx has a live range into the function and is used by the DIV32r. ; ; Here sinking a kill + dead def: -; 144B -> 180B: DIV32r %vreg4, %eax, %edx, %EFLAGS, %eax, %edx -; %vreg4: [48r,144r:0) 0@48r +; 144B -> 180B: DIV32r %4, %eax, %edx, %EFLAGS, %eax, %edx +; %4: [48r,144r:0) 0@48r ; --> [48r,180r:0) 0@48r ; DH: [0B,16r:0)[128r,144r:2)[144r,144d:1) 0@0B-phi 1@144r 2@128r ; --> [0B,16r:0)[128r,180r:2)[180r,180d:1) 0@0B-phi 1@180r 2@128r @@ -25,8 +25,8 @@ } ; Same as above, but moving a kill + live def: -; 144B -> 180B: DIV32r %vreg4, %eax, %edx, %EFLAGS, %eax, %edx -; %vreg4: [48r,144r:0) 0@48r +; 144B -> 180B: DIV32r %4, %eax, %edx, %EFLAGS, %eax, %edx +; %4: [48r,144r:0) 0@48r ; --> [48r,180r:0) 0@48r ; DH: [0B,16r:0)[128r,144r:2)[144r,184r:1) 0@0B-phi 1@144r 2@128r ; --> [0B,16r:0)[128r,180r:2)[180r,184r:1) 0@0B-phi 1@180r 2@128r @@ -41,13 +41,13 @@ ret i32 %add } -; Moving a use below the existing kill (%vreg5): -; Moving a tied virtual register def (%vreg11): +; Moving a use below the existing kill (%5): +; Moving a tied virtual register def (%11): ; -; 96B -> 120B: %vreg11 = SUB32rr %vreg11, %vreg5 -; %vreg11: [80r,96r:1)[96r,144r:0) 0@96r 1@80r +; 96B -> 120B: %11 = SUB32rr %11, %5 +; %11: [80r,96r:1)[96r,144r:0) 0@96r 1@80r ; --> [80r,120r:1)[120r,144r:0) 0@120r 1@80r -; %vreg5: [16r,112r:0) 0@16r +; %5: [16r,112r:0) 0@16r ; --> [16r,120r:0) 0@16r ; define i32 @f3(i32 %a, i32 %b) nounwind uwtable readnone ssp { Index: test/CodeGen/X86/invalid-liveness.mir =================================================================== --- test/CodeGen/X86/invalid-liveness.mir +++ test/CodeGen/X86/invalid-liveness.mir @@ -9,7 +9,7 @@ # on all paths; In this example a def for vreg0 is missing when jumping from # bb.0 to bb.3. # -# CHECK: Use of %vreg0 does not have a corresponding definition on every path +# CHECK: Use of %0 does not have a corresponding definition on every path # CHECK: ERROR: Use not jointly dominated by defs. name: func registers: Index: test/CodeGen/X86/liveness-local-regalloc.ll =================================================================== --- test/CodeGen/X86/liveness-local-regalloc.ll +++ test/CodeGen/X86/liveness-local-regalloc.ll @@ -62,7 +62,7 @@ ; RAFast would forget to add a super-register when rewriting: -; %vreg10:sub_32bit = COPY %R9D +; %10:sub_32bit = COPY %R9D ; This trips up the machine code verifier. define void @autogen_SD24657(i8*, i32*, i64*, i32, i64, i8) { BB: Index: test/CodeGen/X86/misched-copy.ll =================================================================== --- test/CodeGen/X86/misched-copy.ll +++ test/CodeGen/X86/misched-copy.ll @@ -10,7 +10,7 @@ ; ; CHECK: *** Final schedule for BB#1 *** ; CHECK: %eax = COPY -; CHECK-NEXT: MUL32r %vreg{{[0-9]+}}, %eax, %edx, %eflags, %eax; +; CHECK-NEXT: MUL32r %{{[0-9]+}}, %eax, %edx, %eflags, %eax; ; CHECK-NEXT: COPY %e{{[ad]}}x ; CHECK-NEXT: COPY %e{{[ad]}}x ; CHECK: DIVSSrm Index: test/CodeGen/X86/norex-subreg.ll =================================================================== --- test/CodeGen/X86/norex-subreg.ll +++ test/CodeGen/X86/norex-subreg.ll @@ -41,10 +41,10 @@ ; This test case extracts a sub_8bit_hi sub-register: ; -; %vreg2 = COPY %vreg1:sub_8bit_hi; GR8:%vreg2 GR64_ABCD:%vreg1 -; TEST8ri %vreg2, 1, %EFLAGS; GR8:%vreg2 +; %2 = COPY %1:sub_8bit_hi; GR8:%2 GR64_ABCD:%1 +; TEST8ri %2, 1, %EFLAGS; GR8:%2 ; -; %vreg2 must be constrained to GR8_NOREX, or the COPY could become impossible. +; %2 must be constrained to GR8_NOREX, or the COPY could become impossible. ; ; PR11088 Index: test/CodeGen/X86/phys_subreg_coalesce-3.ll =================================================================== --- test/CodeGen/X86/phys_subreg_coalesce-3.ll +++ test/CodeGen/X86/phys_subreg_coalesce-3.ll @@ -1,10 +1,10 @@ ; RUN: llc < %s -verify-machineinstrs -mtriple=i386-apple-darwin -mcpu=corei7 | FileCheck %s ; rdar://5571034 -; This requires physreg joining, %vreg13 is live everywhere: -; 304L %cl = COPY %vreg13:sub_8bit; GR32_ABCD:%vreg13 -; 320L %vreg15 = COPY %vreg19; GR32:%vreg15 GR32_NOSP:%vreg19 -; 336L %vreg15 = SAR32rCL %vreg15, %EFLAGS, %cl; GR32:%vreg15 +; This requires physreg joining, %13 is live everywhere: +; 304L %cl = COPY %13:sub_8bit; GR32_ABCD:%13 +; 320L %15 = COPY %19; GR32:%15 GR32_NOSP:%19 +; 336L %15 = SAR32rCL %15, %EFLAGS, %cl; GR32:%15 define void @foo(i32* nocapture %quadrant, i32* nocapture %ptr, i32 %bbSize, i32 %bbStart, i32 %shifts) nounwind ssp { ; CHECK-LABEL: foo: Index: test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir =================================================================== --- test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir +++ test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir @@ -148,16 +148,16 @@ # Let's verify that the slot index ranges for the unused variables argc/argv, # connected to physical regs %EDI and %RSI, does not overlap with the ranges -# for %vreg2 and %vreg3. The register allocator is actually allocating the -# virtual registers # to %EDI and %ESI, so the ranges for argc/argv should -# not cover the whole BB. +# for %2 and %3. The register allocator is actually allocating the# virtual +# registers # to %EDI and %ESI, so the ranges for argc/argv should not cover +# the whole BB. # # CHECKDBG-LABEL: ********** EMITTING LIVE DEBUG VARIABLES ********** # CHECKDBG-NEXT: !"argc,5" [0B;0e):0 Loc0=%edi # CHECKDBG-NEXT: [0B;0e):0 BB#0-160B # CHECKDBG-NEXT: !"argv,5" [0B;0e):0 Loc0=%rsi # CHECKDBG-NEXT: [0B;0e):0 BB#0-160B -# CHECKDBG-NEXT: !"a0,7" [16r;64r):0 Loc0=%vreg2 +# CHECKDBG-NEXT: !"a0,7" [16r;64r):0 Loc0=%2 # CHECKDBG-NEXT: [16r;64r):0 BB#0-160B -# CHECKDBG-NEXT: !"a1,8" [32r;80r):0 Loc0=%vreg3 +# CHECKDBG-NEXT: !"a1,8" [32r;80r):0 Loc0=%3 # CHECKDBG-NEXT: [32r;80r):0 BB#0-160B