Index: llvm/trunk/include/llvm/CodeGen/RegisterScavenging.h =================================================================== --- llvm/trunk/include/llvm/CodeGen/RegisterScavenging.h +++ llvm/trunk/include/llvm/CodeGen/RegisterScavenging.h @@ -157,12 +157,24 @@ /// available and do the appropriate bookkeeping. SPAdj is the stack /// adjustment due to call frame, it's passed along to eliminateFrameIndex(). /// Returns the scavenged register. + /// This function performs worse if kill flags are incomplete, consider using + /// scavengeRegisterBackwards() instead! unsigned scavengeRegister(const TargetRegisterClass *RegClass, MachineBasicBlock::iterator I, int SPAdj); unsigned scavengeRegister(const TargetRegisterClass *RegClass, int SPAdj) { return scavengeRegister(RegClass, MBBI, SPAdj); } + /// Make a register of the specific register class available from the current + /// position backwards to the place before \p To. If \p RestoreAfter is true + /// this includes the instruction at the current position. + /// SPAdj is the stack adjustment due to call frame, it's passed along to + /// eliminateFrameIndex(). + /// Returns the scavenged register. + unsigned scavengeRegisterBackwards(const TargetRegisterClass &RC, + MachineBasicBlock::iterator To, + bool RestoreAfter, int SPAdj); + /// Tell the scavenger a register is used. void setRegUsed(unsigned Reg, LaneBitmask LaneMask = ~0u); private: @@ -202,6 +214,12 @@ /// Mark live-in registers of basic block as used. void setLiveInsUsed(const MachineBasicBlock &MBB); + + /// Spill a register after position \p After and reload it before position + /// \p UseMI. + ScavengedInfo &spill(unsigned Reg, const TargetRegisterClass &RC, int SPAdj, + MachineBasicBlock::iterator After, + MachineBasicBlock::iterator &UseMI); }; } // End llvm namespace Index: llvm/trunk/lib/CodeGen/PrologEpilogInserter.cpp =================================================================== --- llvm/trunk/lib/CodeGen/PrologEpilogInserter.cpp +++ llvm/trunk/lib/CodeGen/PrologEpilogInserter.cpp @@ -41,6 +41,7 @@ #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Target/TargetSubtargetInfo.h" +#include #include using namespace llvm; @@ -1146,6 +1147,55 @@ } } +/// Allocate a register for the virtual register \p VReg. The last use of +/// \p VReg is around the current position of the register scavenger \p RS. +/// \p ReserveAfter controls whether the scavenged register needs to be reserved +/// after the current instruction, otherwise it will only be reserved before the +/// current instruction. +static unsigned scavengeVReg(MachineRegisterInfo &MRI, RegScavenger &RS, + unsigned VReg, bool ReserveAfter) { +#ifndef NDEBUG + // Verify that all definitions and uses are in the same basic block. + const MachineBasicBlock *CommonMBB = nullptr; + bool HadDef = false; + for (MachineOperand &MO : MRI.reg_nodbg_operands(VReg)) { + MachineBasicBlock *MBB = MO.getParent()->getParent(); + if (CommonMBB == nullptr) + CommonMBB = MBB; + assert(MBB == CommonMBB && "All defs+uses must be in the same basic block"); + if (MO.isDef()) + HadDef = true; + } + assert(HadDef && "Must have at least 1 Def"); +#endif + + // We should only have one definition of the register. However to accomodate + // the requirements of two address code we also allow definitions in + // subsequent instructions provided they also read the register. That way + // we get a single contiguous lifetime. + // + // Definitions in MRI.def_begin() are unordered, search for the first. + const TargetRegisterInfo *TRI = MRI.getTargetRegisterInfo(); + MachineRegisterInfo::def_iterator FirstDef = + std::find_if(MRI.def_begin(VReg), MRI.def_end(), + [VReg, TRI](const MachineOperand &MO) { + return !MO.getParent()->readsRegister(VReg, TRI); + }); + assert(FirstDef != MRI.def_end() && + "Must have one definition that does not redefine vreg"); + MachineInstr &DefMI = *FirstDef->getParent(); + + // The register scavenger will report a free register inserting an emergency + // spill/reload if necessary. + int SPAdj = 0; + const TargetRegisterClass &RC = *MRI.getRegClass(VReg); + unsigned SReg = RS.scavengeRegisterBackwards(RC, DefMI.getIterator(), + ReserveAfter, SPAdj); + MRI.replaceRegWith(VReg, SReg); + ++NumScavengedRegs; + return SReg; +} + /// doScavengeFrameVirtualRegs - Replace all frame index virtual registers /// with physical registers. Use the register scavenger to find an /// appropriate register to use. @@ -1158,77 +1208,46 @@ // Run through the instructions and find any virtual registers. MachineRegisterInfo &MRI = MF.getRegInfo(); for (MachineBasicBlock &MBB : MF) { - RS->enterBasicBlock(MBB); - - int SPAdj = 0; + RS->enterBasicBlockEnd(MBB); - // The instruction stream may change in the loop, so check MBB.end() - // directly. - for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ) { - // We might end up here again with a NULL iterator if we scavenged a - // register for which we inserted spill code for definition by what was - // originally the first instruction in MBB. - if (I == MachineBasicBlock::iterator(nullptr)) - I = MBB.begin(); + bool LastIterationHadVRegUses = false; + for (MachineBasicBlock::iterator I = MBB.end(); I != MBB.begin(); ) { + --I; + // Move RegScavenger to the position between *I and *std::next(I). + RS->backward(I); + + // Look for unassigned vregs in the uses of *std::next(I). + MachineBasicBlock::iterator N = std::next(I); + if (LastIterationHadVRegUses) { + const MachineInstr &NMI = *N; + for (const MachineOperand &MO : NMI.operands()) { + if (!MO.isReg() || !MO.readsReg()) + continue; + unsigned Reg = MO.getReg(); + if (TargetRegisterInfo::isVirtualRegister(Reg)) { + unsigned SReg = scavengeVReg(MRI, *RS, Reg, true); + RS->setRegUsed(SReg); + } + } + } + // Look for unassigned vregs in the defs of *I. + LastIterationHadVRegUses = false; const MachineInstr &MI = *I; - MachineBasicBlock::iterator J = std::next(I); - MachineBasicBlock::iterator P = - I == MBB.begin() ? MachineBasicBlock::iterator(nullptr) - : std::prev(I); - - // RS should process this instruction before we might scavenge at this - // location. This is because we might be replacing a virtual register - // defined by this instruction, and if so, registers killed by this - // instruction are available, and defined registers are not. - RS->forward(I); - for (const MachineOperand &MO : MI.operands()) { if (!MO.isReg()) continue; unsigned Reg = MO.getReg(); if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue; - - // When we first encounter a new virtual register, it - // must be a definition. - assert(MO.isDef() && "frame index virtual missing def!"); - // Scavenge a new scratch register - const TargetRegisterClass *RC = MRI.getRegClass(Reg); - unsigned ScratchReg = RS->scavengeRegister(RC, J, SPAdj); - - ++NumScavengedRegs; - - // Replace this reference to the virtual register with the - // scratch register. - assert(ScratchReg && "Missing scratch register!"); - MRI.replaceRegWith(Reg, ScratchReg); - - // Because this instruction was processed by the RS before this - // register was allocated, make sure that the RS now records the - // register as being used. - RS->setRegUsed(ScratchReg); + // We have to look at all operands anyway so we can precalculate here + // whether there is a reading operand. This allows use to skip the use + // step in the next iteration if there was none. + if (MO.readsReg()) + LastIterationHadVRegUses = true; + if (MO.isDef()) + scavengeVReg(MRI, *RS, Reg, false); } - - // If the scavenger needed to use one of its spill slots, the - // spill code will have been inserted in between I and J. This is a - // problem because we need the spill code before I: Move I to just - // prior to J. - if (I != std::prev(J)) { - MBB.splice(J, &MBB, I); - - // Before we move I, we need to prepare the RS to visit I again. - // Specifically, RS will assert if it sees uses of registers that - // it believes are undefined. Because we have already processed - // register kills in I, when it visits I again, it will believe that - // those registers are undefined. To avoid this situation, unprocess - // the instruction I. - assert(RS->getCurrentPosition() == I && - "The register scavenger has an unexpected position"); - I = P; - RS->unprocess(P); - } else - ++I; } } } Index: llvm/trunk/lib/CodeGen/RegisterScavenging.cpp =================================================================== --- llvm/trunk/lib/CodeGen/RegisterScavenging.cpp +++ llvm/trunk/lib/CodeGen/RegisterScavenging.cpp @@ -299,6 +299,14 @@ } } + // Expire scavenge spill frameindex uses. + for (ScavengedInfo &I : Scavenged) { + if (I.Restore == &MI) { + I.Reg = 0; + I.Restore = nullptr; + } + } + if (MBBI == MBB->begin()) { MBBI = MachineBasicBlock::iterator(nullptr); Tracking = false; @@ -398,6 +406,72 @@ return Survivor; } +static std::pair +findSurvivorBackwards(const TargetRegisterInfo &TRI, + MachineBasicBlock::iterator From, MachineBasicBlock::iterator To, + BitVector &Available, BitVector &Candidates) { + bool FoundTo = false; + unsigned Survivor = 0; + MachineBasicBlock::iterator Pos; + MachineBasicBlock &MBB = *From->getParent(); + MachineBasicBlock::iterator I = From; + unsigned InstrLimit = 25; + unsigned InstrCountDown = InstrLimit; + for (;;) { + const MachineInstr &MI = *I; + if (MI.isDebugValue()) + continue; + + // Remove any candidates touched by instruction. + bool FoundVReg = false; + for (const MachineOperand &MO : MI.operands()) { + if (MO.isRegMask()) { + Candidates.clearBitsNotInMask(MO.getRegMask()); + continue; + } + if (!MO.isReg() || MO.isUndef()) + continue; + unsigned Reg = MO.getReg(); + if (TargetRegisterInfo::isVirtualRegister(Reg)) { + FoundVReg = true; + } else if (TargetRegisterInfo::isPhysicalRegister(Reg)) { + for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI) + Candidates.reset(*AI); + } + } + + if (I == To) { + // If one of the available registers survived this long take it. + Available &= Candidates; + int Reg = Available.find_first(); + if (Reg != -1) + return std::make_pair(Reg, MBB.end()); + // Otherwise we will continue up to InstrLimit instructions to fine + // the register which is not defined/used for the longest time. + FoundTo = true; + Pos = To; + } + if (FoundTo) { + if (Survivor == 0 || !Candidates.test(Survivor)) { + int Reg = Candidates.find_first(); + if (Reg == -1) + break; + Survivor = Reg; + } + if (--InstrCountDown == 0 || I == MBB.begin()) + break; + if (FoundVReg) { + // We found a vreg, reset the InstrLimit counter. + InstrCountDown = InstrLimit; + Pos = I; + } + } + --I; + } + + return std::make_pair(Survivor, Pos); +} + static unsigned getFrameIndexOperandNum(MachineInstr &MI) { unsigned i = 0; while (!MI.getOperand(i).isFI()) { @@ -407,43 +481,16 @@ return i; } -unsigned RegScavenger::scavengeRegister(const TargetRegisterClass *RC, - MachineBasicBlock::iterator I, - int SPAdj) { - MachineInstr &MI = *I; - const MachineFunction &MF = *MI.getParent()->getParent(); - // Consider all allocatable registers in the register class initially - BitVector Candidates = TRI->getAllocatableSet(MF, RC); - - // Exclude all the registers being used by the instruction. - for (const MachineOperand &MO : MI.operands()) { - if (MO.isReg() && MO.getReg() != 0 && !(MO.isUse() && MO.isUndef()) && - !TargetRegisterInfo::isVirtualRegister(MO.getReg())) - Candidates.reset(MO.getReg()); - } - - // Try to find a register that's unused if there is one, as then we won't - // have to spill. - BitVector Available = getRegsAvailable(RC); - Available &= Candidates; - if (Available.any()) - Candidates = Available; - - // Find the register whose use is furthest away. - MachineBasicBlock::iterator UseMI; - unsigned SReg = findSurvivorReg(I, Candidates, 25, UseMI); - - // If we found an unused register there is no reason to spill it. - if (!isRegUsed(SReg)) { - DEBUG(dbgs() << "Scavenged register: " << TRI->getName(SReg) << "\n"); - return SReg; - } - +RegScavenger::ScavengedInfo & +RegScavenger::spill(unsigned Reg, const TargetRegisterClass &RC, int SPAdj, + MachineBasicBlock::iterator Before, + MachineBasicBlock::iterator &UseMI) { // Find an available scavenging slot with size and alignment matching // the requirements of the class RC. + const MachineFunction &MF = *Before->getParent()->getParent(); const MachineFrameInfo &MFI = *MF.getFrameInfo(); - unsigned NeedSize = RC->getSize(); - unsigned NeedAlign = RC->getAlignment(); + unsigned NeedSize = RC.getSize(); + unsigned NeedAlign = RC.getAlignment(); unsigned SI = Scavenged.size(), Diff = UINT_MAX; int FIB = MFI.getObjectIndexBegin(), FIE = MFI.getObjectIndexEnd(); @@ -478,42 +525,108 @@ } // Avoid infinite regress - Scavenged[SI].Reg = SReg; + Scavenged[SI].Reg = Reg; // If the target knows how to save/restore the register, let it do so; // otherwise, use the emergency stack spill slot. - if (!TRI->saveScavengerRegister(*MBB, I, UseMI, RC, SReg)) { - // Spill the scavenged register before I. + if (!TRI->saveScavengerRegister(*MBB, Before, UseMI, &RC, Reg)) { + // Spill the scavenged register before \p Before. int FI = Scavenged[SI].FrameIndex; if (FI < FIB || FI >= FIE) { std::string Msg = std::string("Error while trying to spill ") + - TRI->getName(SReg) + " from class " + TRI->getRegClassName(RC) + + TRI->getName(Reg) + " from class " + TRI->getRegClassName(&RC) + ": Cannot scavenge register without an emergency spill slot!"; report_fatal_error(Msg.c_str()); } - TII->storeRegToStackSlot(*MBB, I, SReg, true, Scavenged[SI].FrameIndex, - RC, TRI); - MachineBasicBlock::iterator II = std::prev(I); + TII->storeRegToStackSlot(*MBB, Before, Reg, true, Scavenged[SI].FrameIndex, + &RC, TRI); + MachineBasicBlock::iterator II = std::prev(Before); unsigned FIOperandNum = getFrameIndexOperandNum(*II); TRI->eliminateFrameIndex(II, SPAdj, FIOperandNum, this); // Restore the scavenged register before its use (or first terminator). - TII->loadRegFromStackSlot(*MBB, UseMI, SReg, Scavenged[SI].FrameIndex, - RC, TRI); + TII->loadRegFromStackSlot(*MBB, UseMI, Reg, Scavenged[SI].FrameIndex, + &RC, TRI); II = std::prev(UseMI); FIOperandNum = getFrameIndexOperandNum(*II); TRI->eliminateFrameIndex(II, SPAdj, FIOperandNum, this); } + return Scavenged[SI]; +} + +unsigned RegScavenger::scavengeRegister(const TargetRegisterClass *RC, + MachineBasicBlock::iterator I, + int SPAdj) { + MachineInstr &MI = *I; + const MachineFunction &MF = *MI.getParent()->getParent(); + // Consider all allocatable registers in the register class initially + BitVector Candidates = TRI->getAllocatableSet(MF, RC); + + // Exclude all the registers being used by the instruction. + for (const MachineOperand &MO : MI.operands()) { + if (MO.isReg() && MO.getReg() != 0 && !(MO.isUse() && MO.isUndef()) && + !TargetRegisterInfo::isVirtualRegister(MO.getReg())) + Candidates.reset(MO.getReg()); + } + + // Try to find a register that's unused if there is one, as then we won't + // have to spill. + BitVector Available = getRegsAvailable(RC); + Available &= Candidates; + if (Available.any()) + Candidates = Available; + + // Find the register whose use is furthest away. + MachineBasicBlock::iterator UseMI; + unsigned SReg = findSurvivorReg(I, Candidates, 25, UseMI); - Scavenged[SI].Restore = &*std::prev(UseMI); + // If we found an unused register there is no reason to spill it. + if (!isRegUsed(SReg)) { + DEBUG(dbgs() << "Scavenged register: " << TRI->getName(SReg) << "\n"); + return SReg; + } - // Doing this here leads to infinite regress. - // Scavenged[SI].Reg = SReg; + ScavengedInfo &Scavenged = spill(SReg, *RC, SPAdj, I, UseMI); + Scavenged.Restore = std::prev(UseMI); DEBUG(dbgs() << "Scavenged register (with spill): " << TRI->getName(SReg) << "\n"); return SReg; } + +unsigned RegScavenger::scavengeRegisterBackwards(const TargetRegisterClass &RC, + MachineBasicBlock::iterator To, + bool RestoreAfter, int SPAdj) { + const MachineBasicBlock &MBB = *To->getParent(); + const MachineFunction &MF = *MBB.getParent(); + // Consider all allocatable registers in the register class initially + BitVector Candidates = TRI->getAllocatableSet(MF, &RC); + + // Try to find a register that's unused if there is one, as then we won't + // have to spill. + BitVector Available = getRegsAvailable(&RC); + + // Find the register whose use is furthest away. + MachineBasicBlock::iterator UseMI; + std::pair P = + findSurvivorBackwards(*TRI, MBBI, To, Available, Candidates); + unsigned Reg = P.first; + assert(Reg != 0 && "No register left to scavenge!"); + // Found an available register? + if (!Available.test(Reg)) { + MachineBasicBlock::iterator ReloadBefore = + RestoreAfter ? std::next(MBBI) : MBBI; + DEBUG(dbgs() << "Reload before: " << *ReloadBefore << '\n'); + ScavengedInfo &Scavenged = spill(Reg, RC, SPAdj, P.second, ReloadBefore); + Scavenged.Restore = std::prev(P.second); + addRegUnits(RegUnitsAvailable, Reg); + DEBUG(dbgs() << "Scavenged register with spill: " << PrintReg(Reg, TRI) + << " until " << *P.second); + } else { + DEBUG(dbgs() << "Scavenged free register: " << PrintReg(Reg, TRI) << '\n'); + } + return Reg; +} Index: llvm/trunk/test/CodeGen/AMDGPU/captured-frame-index.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/captured-frame-index.ll +++ llvm/trunk/test/CodeGen/AMDGPU/captured-frame-index.ll @@ -140,8 +140,8 @@ } ; GCN-LABEL: {{^}}stored_fi_to_global_huge_frame_offset: -; GCN: s_add_i32 [[BASE_1_OFF_0:s[0-9]+]], 0, 0x3ffc -; GCN: v_mov_b32_e32 [[BASE_0:v[0-9]+]], 0{{$}} +; GCN-DAG: s_add_i32 [[BASE_1_OFF_0:s[0-9]+]], 0, 0x3ffc +; GCN-DAG: v_mov_b32_e32 [[BASE_0:v[0-9]+]], 0{{$}} ; GCN: buffer_store_dword [[BASE_0]], v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen ; GCN: v_mov_b32_e32 [[V_BASE_1_OFF_0:v[0-9]+]], [[BASE_1_OFF_0]] Index: llvm/trunk/test/CodeGen/Mips/emergency-spill-slot-near-fp.ll =================================================================== --- llvm/trunk/test/CodeGen/Mips/emergency-spill-slot-near-fp.ll +++ llvm/trunk/test/CodeGen/Mips/emergency-spill-slot-near-fp.ll @@ -1,34 +1,62 @@ -; Check that register scavenging spill slot is close to $fp. ; RUN: llc -march=mipsel -O0 -relocation-model=pic < %s | FileCheck %s +; Check that register scavenging spill slot is close to $fp. +target triple="mipsel--" -; CHECK: sw ${{.*}}, 8($sp) -; CHECK: lw ${{.*}}, 8($sp) +@var = external global i32 +@ptrvar = external global i8* -define i32 @main(i32 signext %argc, i8** %argv) #0 { -entry: - %retval = alloca i32, align 4 - %argc.addr = alloca i32, align 4 - %argv.addr = alloca i8**, align 4 - %v0 = alloca <16 x i8>, align 16 - %.compoundliteral = alloca <16 x i8>, align 16 - %v1 = alloca <16 x i8>, align 16 - %.compoundliteral1 = alloca <16 x i8>, align 16 - %unused_variable = alloca [16384 x i32], align 4 - %result = alloca <16 x i8>, align 16 - store i32 0, i32* %retval - store i32 %argc, i32* %argc.addr, align 4 - store i8** %argv, i8*** %argv.addr, align 4 - store <16 x i8> , <16 x i8>* %.compoundliteral - %0 = load <16 x i8>, <16 x i8>* %.compoundliteral - store <16 x i8> %0, <16 x i8>* %v0, align 16 - store <16 x i8> zeroinitializer, <16 x i8>* %.compoundliteral1 - %1 = load <16 x i8>, <16 x i8>* %.compoundliteral1 - store <16 x i8> %1, <16 x i8>* %v1, align 16 - %2 = load <16 x i8>, <16 x i8>* %v0, align 16 - %3 = load <16 x i8>, <16 x i8>* %v1, align 16 - %mul = mul <16 x i8> %2, %3 - store <16 x i8> %mul, <16 x i8>* %result, align 16 - ret i32 0 -} +; CHECK-LABEL: func: +define void @func() { + %space = alloca i32, align 4 + %stackspace = alloca[16384 x i32], align 4 + + ; ensure stackspace is not optimized out + %stackspace_casted = bitcast [16384 x i32]* %stackspace to i8* + store volatile i8* %stackspace_casted, i8** @ptrvar -attributes #0 = { noinline "no-frame-pointer-elim"="true" } + ; Load values to increase register pressure. + %v0 = load volatile i32, i32* @var + %v1 = load volatile i32, i32* @var + %v2 = load volatile i32, i32* @var + %v3 = load volatile i32, i32* @var + %v4 = load volatile i32, i32* @var + %v5 = load volatile i32, i32* @var + %v6 = load volatile i32, i32* @var + %v7 = load volatile i32, i32* @var + %v8 = load volatile i32, i32* @var + %v9 = load volatile i32, i32* @var + %v10 = load volatile i32, i32* @var + %v11 = load volatile i32, i32* @var + %v12 = load volatile i32, i32* @var + %v13 = load volatile i32, i32* @var + %v14 = load volatile i32, i32* @var + %v15 = load volatile i32, i32* @var + %v16 = load volatile i32, i32* @var + + ; Computing a stack-relative values needs an additional register. + ; We should get an emergency spill/reload for this. + ; CHECK: sw ${{.*}}, 0($sp) + ; CHECK: lw ${{.*}}, 0($sp) + store volatile i32 %v0, i32* %space + + ; store values so they are used. + store volatile i32 %v0, i32* @var + store volatile i32 %v1, i32* @var + store volatile i32 %v2, i32* @var + store volatile i32 %v3, i32* @var + store volatile i32 %v4, i32* @var + store volatile i32 %v5, i32* @var + store volatile i32 %v6, i32* @var + store volatile i32 %v7, i32* @var + store volatile i32 %v8, i32* @var + store volatile i32 %v9, i32* @var + store volatile i32 %v10, i32* @var + store volatile i32 %v11, i32* @var + store volatile i32 %v12, i32* @var + store volatile i32 %v13, i32* @var + store volatile i32 %v14, i32* @var + store volatile i32 %v15, i32* @var + store volatile i32 %v16, i32* @var + + ret void +} Index: llvm/trunk/test/CodeGen/PowerPC/dyn-alloca-aligned.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/dyn-alloca-aligned.ll +++ llvm/trunk/test/CodeGen/PowerPC/dyn-alloca-aligned.ll @@ -25,8 +25,8 @@ ; CHECK-DAG: li [[REG1:[0-9]+]], -128 ; CHECK-DAG: neg [[REG2:[0-9]+]], -; CHECK: and [[REG1]], [[REG2]], [[REG1]] -; CHECK: stdux {{[0-9]+}}, 1, [[REG1]] +; CHECK: and [[REG3:[0-9]+]], [[REG2]], [[REG1]] +; CHECK: stdux {{[0-9]+}}, 1, [[REG3]] ; CHECK: blr Index: llvm/trunk/test/CodeGen/SystemZ/frame-13.ll =================================================================== --- llvm/trunk/test/CodeGen/SystemZ/frame-13.ll +++ llvm/trunk/test/CodeGen/SystemZ/frame-13.ll @@ -212,15 +212,15 @@ ; CHECK-NOFP-LABEL: f10: ; CHECK-NOFP: stg [[REGISTER:%r[1-9][0-4]?]], [[OFFSET:160|168]](%r15) ; CHECK-NOFP: lay [[REGISTER]], 4096(%r15) -; CHECK-NOFP: mvhi 0([[REGISTER]]), 42 ; CHECK-NOFP: lg [[REGISTER]], [[OFFSET]](%r15) +; CHECK-NOFP: mvhi 0([[REGISTER]]), 42 ; CHECK-NOFP: br %r14 ; ; CHECK-FP-LABEL: f10: ; CHECK-FP: stg [[REGISTER:%r[1-9][0-4]?]], [[OFFSET:160|168]](%r11) ; CHECK-FP: lay [[REGISTER]], 4096(%r11) -; CHECK-FP: mvhi 0([[REGISTER]]), 42 ; CHECK-FP: lg [[REGISTER]], [[OFFSET]](%r11) +; CHECK-FP: mvhi 0([[REGISTER]]), 42 ; CHECK-FP: br %r14 %i0 = load volatile i32 , i32 *%vptr %i1 = load volatile i32 , i32 *%vptr @@ -250,8 +250,8 @@ ; CHECK-NOFP: stmg %r6, %r15, ; CHECK-NOFP: stg [[REGISTER:%r[1-9][0-4]?]], [[OFFSET:160|168]](%r15) ; CHECK-NOFP: lay [[REGISTER]], 4096(%r15) -; CHECK-NOFP: mvhi 0([[REGISTER]]), 42 ; CHECK-NOFP: lg [[REGISTER]], [[OFFSET]](%r15) +; CHECK-NOFP: mvhi 0([[REGISTER]]), 42 ; CHECK-NOFP: lmg %r6, %r15, ; CHECK-NOFP: br %r14 %i0 = load volatile i32 , i32 *%vptr Index: llvm/trunk/test/CodeGen/SystemZ/frame-14.ll =================================================================== --- llvm/trunk/test/CodeGen/SystemZ/frame-14.ll +++ llvm/trunk/test/CodeGen/SystemZ/frame-14.ll @@ -234,16 +234,16 @@ ; CHECK-NOFP: stg [[REGISTER:%r[1-9][0-4]?]], [[OFFSET:160|168]](%r15) ; CHECK-NOFP: llilh [[REGISTER]], 8 ; CHECK-NOFP: agr [[REGISTER]], %r15 -; CHECK-NOFP: mvi 0([[REGISTER]]), 42 ; CHECK-NOFP: lg [[REGISTER]], [[OFFSET]](%r15) +; CHECK-NOFP: mvi 0([[REGISTER]]), 42 ; CHECK-NOFP: br %r14 ; ; CHECK-FP-LABEL: f10: ; CHECK-FP: stg [[REGISTER:%r[1-9][0-4]?]], [[OFFSET:160|168]](%r11) ; CHECK-FP: llilh [[REGISTER]], 8 ; CHECK-FP: agr [[REGISTER]], %r11 -; CHECK-FP: mvi 0([[REGISTER]]), 42 ; CHECK-FP: lg [[REGISTER]], [[OFFSET]](%r11) +; CHECK-FP: mvi 0([[REGISTER]]), 42 ; CHECK-FP: br %r14 %i0 = load volatile i32 , i32 *%vptr %i1 = load volatile i32 , i32 *%vptr @@ -274,8 +274,8 @@ ; CHECK-NOFP: stg [[REGISTER:%r[1-9][0-4]?]], [[OFFSET:160|168]](%r15) ; CHECK-NOFP: llilh [[REGISTER]], 8 ; CHECK-NOFP: agr [[REGISTER]], %r15 -; CHECK-NOFP: mvi 0([[REGISTER]]), 42 ; CHECK-NOFP: lg [[REGISTER]], [[OFFSET]](%r15) +; CHECK-NOFP: mvi 0([[REGISTER]]), 42 ; CHECK-NOFP: lmg %r6, %r15, ; CHECK-NOFP: br %r14 %i0 = load volatile i32 , i32 *%vptr Index: llvm/trunk/test/CodeGen/SystemZ/frame-15.ll =================================================================== --- llvm/trunk/test/CodeGen/SystemZ/frame-15.ll +++ llvm/trunk/test/CodeGen/SystemZ/frame-15.ll @@ -279,15 +279,15 @@ ; CHECK-NOFP-LABEL: f10: ; CHECK-NOFP: stg [[REGISTER:%r[1-9][0-4]?]], [[OFFSET:160|168]](%r15) ; CHECK-NOFP: lghi [[REGISTER]], 4096 -; CHECK-NOFP: ldeb {{%f[0-7]}}, 0([[REGISTER]],%r15) ; CHECK-NOFP: lg [[REGISTER]], [[OFFSET]](%r15) +; CHECK-NOFP: ldeb {{%f[0-7]}}, 0([[REGISTER]],%r15) ; CHECK-NOFP: br %r14 ; ; CHECK-FP-LABEL: f10: ; CHECK-FP: stg [[REGISTER:%r[1-9][0-4]?]], [[OFFSET:160|168]](%r11) ; CHECK-FP: lghi [[REGISTER]], 4096 -; CHECK-FP: ldeb {{%f[0-7]}}, 0([[REGISTER]],%r11) ; CHECK-FP: lg [[REGISTER]], [[OFFSET]](%r11) +; CHECK-FP: ldeb {{%f[0-7]}}, 0([[REGISTER]],%r11) ; CHECK-FP: br %r14 %region1 = alloca [978 x float], align 8 %region2 = alloca [978 x float], align 8 Index: llvm/trunk/test/CodeGen/SystemZ/frame-16.ll =================================================================== --- llvm/trunk/test/CodeGen/SystemZ/frame-16.ll +++ llvm/trunk/test/CodeGen/SystemZ/frame-16.ll @@ -223,15 +223,15 @@ ; CHECK-NOFP-LABEL: f10: ; CHECK-NOFP: stg [[REGISTER:%r[1-9][0-4]?]], [[OFFSET:160|168]](%r15) ; CHECK-NOFP: llilh [[REGISTER]], 8 -; CHECK-NOFP: stc %r3, 0([[REGISTER]],%r15) ; CHECK-NOFP: lg [[REGISTER]], [[OFFSET]](%r15) +; CHECK-NOFP: stc %r3, 0([[REGISTER]],%r15) ; CHECK-NOFP: br %r14 ; ; CHECK-FP-LABEL: f10: ; CHECK-FP: stg [[REGISTER:%r[1-9][0-4]?]], [[OFFSET:160|168]](%r11) ; CHECK-FP: llilh [[REGISTER]], 8 -; CHECK-FP: stc %r3, 0([[REGISTER]],%r11) ; CHECK-FP: lg [[REGISTER]], [[OFFSET]](%r11) +; CHECK-FP: stc %r3, 0([[REGISTER]],%r11) ; CHECK-FP: br %r14 %i0 = load volatile i32 , i32 *%vptr %i1 = load volatile i32 , i32 *%vptr @@ -259,8 +259,8 @@ ; CHECK-NOFP: stmg %r6, %r15, ; CHECK-NOFP: stg [[REGISTER:%r[1-9][0-4]?]], [[OFFSET:160|168]](%r15) ; CHECK-NOFP: llilh [[REGISTER]], 8 -; CHECK-NOFP: stc %r3, 0([[REGISTER]],%r15) ; CHECK-NOFP: lg [[REGISTER]], [[OFFSET]](%r15) +; CHECK-NOFP: stc %r3, 0([[REGISTER]],%r15) ; CHECK-NOFP: lmg %r6, %r15, ; CHECK-NOFP: br %r14 ; @@ -268,8 +268,8 @@ ; CHECK-FP: stmg %r6, %r15, ; CHECK-FP: stg [[REGISTER:%r[1-9][0-4]?]], [[OFFSET:160|168]](%r11) ; CHECK-FP: llilh [[REGISTER]], 8 -; CHECK-FP: stc %r3, 8([[REGISTER]],%r11) ; CHECK-FP: lg [[REGISTER]], [[OFFSET]](%r11) +; CHECK-FP: stc %r3, 8([[REGISTER]],%r11) ; CHECK-FP: lmg %r6, %r15, ; CHECK-FP: br %r14 %i0 = load volatile i32 , i32 *%vptr Index: llvm/trunk/test/CodeGen/Thumb/large-stack.ll =================================================================== --- llvm/trunk/test/CodeGen/Thumb/large-stack.ll +++ llvm/trunk/test/CodeGen/Thumb/large-stack.ll @@ -46,8 +46,8 @@ ; CHECK-LABEL: test3: ; CHECK: ldr [[TEMP:r[0-7]]], ; CHECK: add sp, [[TEMP]] -; CHECK: ldr [[TEMP]], -; CHECK: add [[TEMP]], sp +; CHECK: ldr [[TEMP2:r[0-7]]], +; CHECK: add [[TEMP2]], sp ; EABI: ldr [[TEMP:r[0-7]]], ; EABI: add sp, [[TEMP]] ; IOS: subs r4, r7, #4