Index: llvm/trunk/include/llvm/CodeGen/MachineOperand.h =================================================================== --- llvm/trunk/include/llvm/CodeGen/MachineOperand.h +++ llvm/trunk/include/llvm/CodeGen/MachineOperand.h @@ -239,12 +239,17 @@ /// called to check this. static void printSubregIdx(raw_ostream &OS, uint64_t Index, const TargetRegisterInfo *TRI); + /// Print operand target flags. static void printTargetFlags(raw_ostream& OS, const MachineOperand &Op); /// Print a MCSymbol as an operand. static void printSymbol(raw_ostream &OS, MCSymbol &Sym); + /// Print a stack object reference. + static void printStackObjectReference(raw_ostream &OS, unsigned FrameIndex, + bool IsFixed, StringRef Name); + /// Print the MachineOperand to \p os. /// Providing a valid \p TRI and \p IntrinsicInfo results in a more /// target-specific printing. If \p TRI and \p IntrinsicInfo are null, the Index: llvm/trunk/lib/CodeGen/MIRPrinter.cpp =================================================================== --- llvm/trunk/lib/CodeGen/MIRPrinter.cpp +++ llvm/trunk/lib/CodeGen/MIRPrinter.cpp @@ -758,13 +758,8 @@ assert(ObjectInfo != StackObjectOperandMapping.end() && "Invalid frame index"); const FrameIndexOperand &Operand = ObjectInfo->second; - if (Operand.IsFixed) { - OS << "%fixed-stack." << Operand.ID; - return; - } - OS << "%stack." << Operand.ID; - if (!Operand.Name.empty()) - OS << '.' << Operand.Name; + MachineOperand::printStackObjectReference(OS, Operand.ID, Operand.IsFixed, + Operand.Name); } void MIPrinter::printOffset(int64_t Offset) { Index: llvm/trunk/lib/CodeGen/MachineOperand.cpp =================================================================== --- llvm/trunk/lib/CodeGen/MachineOperand.cpp +++ llvm/trunk/lib/CodeGen/MachineOperand.cpp @@ -14,6 +14,7 @@ #include "llvm/CodeGen/MachineOperand.h" #include "llvm/Analysis/Loads.h" #include "llvm/CodeGen/MIRPrinter.h" +#include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineJumpTableInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/TargetInstrInfo.h" @@ -476,6 +477,19 @@ OS << ""; } +void MachineOperand::printStackObjectReference(raw_ostream &OS, + unsigned FrameIndex, + bool IsFixed, StringRef Name) { + if (IsFixed) { + OS << "%fixed-stack." << FrameIndex; + return; + } + + OS << "%stack." << FrameIndex; + if (!Name.empty()) + OS << '.' << Name; +} + void MachineOperand::print(raw_ostream &OS, const TargetRegisterInfo *TRI, const TargetIntrinsicInfo *IntrinsicInfo) const { tryToGetTargetInfo(*this, TRI, IntrinsicInfo); @@ -574,9 +588,22 @@ case MachineOperand::MO_MachineBasicBlock: OS << printMBBReference(*getMBB()); break; - case MachineOperand::MO_FrameIndex: - OS << "'; + case MachineOperand::MO_FrameIndex: { + int FrameIndex = getIndex(); + bool IsFixed = false; + StringRef Name; + if (const MachineFunction *MF = getMFIfAvailable(*this)) { + const MachineFrameInfo &MFI = MF->getFrameInfo(); + IsFixed = MFI.isFixedObjectIndex(FrameIndex); + if (const AllocaInst *Alloca = MFI.getObjectAllocation(FrameIndex)) + if (Alloca->hasName()) + Name = Alloca->getName(); + if (IsFixed) + FrameIndex -= MFI.getObjectIndexBegin(); + } + printStackObjectReference(OS, FrameIndex, IsFixed, Name); break; + } case MachineOperand::MO_ConstantPoolIndex: OS << "%const." << getIndex(); printOffset(OS, getOffset()); Index: llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -2835,7 +2835,7 @@ // In this case we can still safely fold away the COPY and generate the // following spill code: // - // STRXui %xzr, + // STRXui %xzr, %stack.0 // // This also eliminates spilled cross register class COPYs (e.g. between x and // d regs) of the same size. For example: @@ -2891,7 +2891,7 @@ // where the physical register source can be widened and stored to the full // virtual reg destination stack slot, in this case producing: // - // STRXui %xzr, + // STRXui %xzr, %stack.0 // if (IsSpill && DstMO.isUndef() && TargetRegisterInfo::isPhysicalRegister(SrcReg)) { @@ -2939,7 +2939,7 @@ // where we can load the full virtual reg source stack slot, into the subreg // destination, in this case producing: // - // LDRWui %0:sub_32, + // LDRWui %0:sub_32, %stack.0 // if (IsFill && SrcMO.getSubReg() == 0 && DstMO.isUndef()) { const TargetRegisterClass *FillRC; Index: llvm/trunk/lib/Target/X86/README-SSE.txt =================================================================== --- llvm/trunk/lib/Target/X86/README-SSE.txt +++ llvm/trunk/lib/Target/X86/README-SSE.txt @@ -167,16 +167,16 @@ cond_next140 (0xa910740, LLVM BB @0xa90beb0): %eax = MOV32ri -3 - %edx = MOV32rm , 1, %noreg, 0 + %edx = MOV32rm %stack.3, 1, %noreg, 0 ADD32rm %eax, %edx, 1, %noreg, 0 - %edx = MOV32rm , 1, %noreg, 0 + %edx = MOV32rm %stack.7, 1, %noreg, 0 %edx = MOV32rm %edx, 1, %noreg, 40 IMUL32rr %eax, %edx - %esi = MOV32rm , 1, %noreg, 0 + %esi = MOV32rm %stack.5, 1, %noreg, 0 %esi = MOV32rm %esi, 1, %noreg, 0 - MOV32mr , 1, %noreg, 0, %esi + MOV32mr %stack.4, 1, %noreg, 0, %esi %eax = LEA32r %esi, 1, %eax, -3 - %esi = MOV32rm , 1, %noreg, 0 + %esi = MOV32rm %stack.7, 1, %noreg, 0 %esi = MOV32rm %esi, 1, %noreg, 32 %edi = MOV32rr %eax SHL32ri %edi, 4 Index: llvm/trunk/test/CodeGen/AArch64/tailcall_misched_graph.ll =================================================================== --- llvm/trunk/test/CodeGen/AArch64/tailcall_misched_graph.ll +++ llvm/trunk/test/CodeGen/AArch64/tailcall_misched_graph.ll @@ -26,19 +26,19 @@ ; CHECK: fi#-2: {{.*}} fixed, at location [SP+8] ; CHECK: fi#-1: {{.*}} fixed, at location [SP] -; CHECK: [[VRA:%.*]]:gpr64 = LDRXui -; CHECK: [[VRB:%.*]]:gpr64 = LDRXui -; CHECK: STRXui %{{.*}}, -; CHECK: STRXui [[VRB]], +; CHECK: [[VRA:%.*]]:gpr64 = LDRXui %fixed-stack.3 +; CHECK: [[VRB:%.*]]:gpr64 = LDRXui %fixed-stack.2 +; CHECK: STRXui %{{.*}}, %fixed-stack.0 +; CHECK: STRXui [[VRB]], %fixed-stack.1 ; Make sure that there is an dependence edge between fi#-2 and fi#-4. ; Without this edge the scheduler would be free to move the store accross the load. -; CHECK: SU({{.*}}): [[VRB]]:gpr64 = LDRXui +; CHECK: SU({{.*}}): [[VRB]]:gpr64 = LDRXui %fixed-stack.2 ; CHECK-NOT: SU ; CHECK: Successors: ; CHECK: SU([[DEPSTOREB:.*]]): Ord Latency=0 ; CHECK: SU([[DEPSTOREA:.*]]): Ord Latency=0 -; CHECK: SU([[DEPSTOREA]]): STRXui %{{.*}}, -; CHECK: SU([[DEPSTOREB]]): STRXui %{{.*}}, +; CHECK: SU([[DEPSTOREA]]): STRXui %{{.*}}, %fixed-stack.0 +; CHECK: SU([[DEPSTOREB]]): STRXui %{{.*}}, %fixed-stack.1 Index: llvm/trunk/test/CodeGen/X86/2010-05-12-FastAllocKills.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/2010-05-12-FastAllocKills.ll +++ llvm/trunk/test/CodeGen/X86/2010-05-12-FastAllocKills.ll @@ -15,13 +15,13 @@ ; The X86FP pass needs good kill flags, like on %fp0 representing %reg1034: ;%bb.5: derived from LLVM BB %bb10 ; Predecessors according to CFG: %bb.4 %bb.5 -; %fp0 = LD_Fp80m , 1, %reg0, 0, %reg0; mem:LD10[FixedStack3](align=4) +; %fp0 = LD_Fp80m %stack.3, 1, %reg0, 0, %reg0; mem:LD10[FixedStack3](align=4) ; %fp1 = MOV_Fp8080 killed %fp0 ; %fp2 = MUL_Fp80m32 %fp1, %rip, 1, %reg0, %const.0, %reg0; mem:LD4[ConstantPool] ; %fp0 = MOV_Fp8080 %fp2 -; ST_FpP80m , 1, %reg0, 0, %reg0, killed %fp0; mem:ST10[FixedStack3](align=4) -; ST_FpP80m , 1, %reg0, 0, %reg0, killed %fp1; mem:ST10[FixedStack4](align=4) -; ST_FpP80m , 1, %reg0, 0, %reg0, killed %fp2; mem:ST10[FixedStack5](align=4) +; ST_FpP80m %stack.3, 1, %reg0, 0, %reg0, killed %fp0; mem:ST10[FixedStack3](align=4) +; ST_FpP80m %stack.4, 1, %reg0, 0, %reg0, killed %fp1; mem:ST10[FixedStack4](align=4) +; ST_FpP80m %stack.5, 1, %reg0, 0, %reg0, killed %fp2; mem:ST10[FixedStack5](align=4) ; FP_REG_KILL implicit-def %fp0, implicit-def %fp1, implicit-def %fp2, implicit-def %fp3, implicit-def %fp4, implicit-def %fp5, implicit-def %fp6 ; JMP_4 <%bb.5> ; Successors according to CFG: %bb.5