Index: llvm/trunk/lib/CodeGen/AggressiveAntiDepBreaker.cpp =================================================================== --- llvm/trunk/lib/CodeGen/AggressiveAntiDepBreaker.cpp +++ llvm/trunk/lib/CodeGen/AggressiveAntiDepBreaker.cpp @@ -448,11 +448,11 @@ // FIXME: The issue with predicated instruction is more complex. We are being // conservatively here because the kill markers cannot be trusted after // if-conversion: - // %r6 = LDR %sp, %reg0, 92, pred:14, pred:%reg0; mem:LD4[FixedStack14] + // %r6 = LDR %sp, %reg0, 92, 14, %reg0; mem:LD4[FixedStack14] // ... - // STR %r0, killed %r6, %reg0, 0, pred:0, pred:%cpsr; mem:ST4[%395] - // %r6 = LDR %sp, %reg0, 100, pred:0, pred:%cpsr; mem:LD4[FixedStack12] - // STR %r0, killed %r6, %reg0, 0, pred:14, pred:%reg0; mem:ST4[%396](align=8) + // STR %r0, killed %r6, %reg0, 0, 0, %cpsr; mem:ST4[%395] + // %r6 = LDR %sp, %reg0, 100, 0, %cpsr; mem:LD4[FixedStack12] + // STR %r0, killed %r6, %reg0, 0, 14, %reg0; mem:ST4[%396](align=8) // // The first R6 kill is not really a kill since it's killed by a predicated // instruction which may not be executed. The second R6 def may or may not Index: llvm/trunk/lib/CodeGen/CriticalAntiDepBreaker.cpp =================================================================== --- llvm/trunk/lib/CodeGen/CriticalAntiDepBreaker.cpp +++ llvm/trunk/lib/CodeGen/CriticalAntiDepBreaker.cpp @@ -170,11 +170,11 @@ // FIXME: The issue with predicated instruction is more complex. We are being // conservative here because the kill markers cannot be trusted after // if-conversion: - // %r6 = LDR %sp, %reg0, 92, pred:14, pred:%reg0; mem:LD4[FixedStack14] + // %r6 = LDR %sp, %reg0, 92, 14, %reg0; mem:LD4[FixedStack14] // ... - // STR %r0, killed %r6, %reg0, 0, pred:0, pred:%cpsr; mem:ST4[%395] - // %r6 = LDR %sp, %reg0, 100, pred:0, pred:%cpsr; mem:LD4[FixedStack12] - // STR %r0, killed %r6, %reg0, 0, pred:14, pred:%reg0; mem:ST4[%396](align=8) + // STR %r0, killed %r6, %reg0, 0, 0, %cpsr; mem:ST4[%395] + // %r6 = LDR %sp, %reg0, 100, 0, %cpsr; mem:LD4[FixedStack12] + // STR %r0, killed %r6, %reg0, 0, 14, %reg0; mem:ST4[%396](align=8) // // The first R6 kill is not really a kill since it's killed by a predicated // instruction which may not be executed. The second R6 def may or may not Index: llvm/trunk/lib/CodeGen/MachineInstr.cpp =================================================================== --- llvm/trunk/lib/CodeGen/MachineInstr.cpp +++ llvm/trunk/lib/CodeGen/MachineInstr.cpp @@ -1320,13 +1320,7 @@ if (FirstOp) FirstOp = false; else OS << ","; OS << " "; - if (i < getDesc().NumOperands) { - const MCOperandInfo &MCOI = getDesc().OpInfo[i]; - if (MCOI.isPredicate()) - OS << "pred:"; - if (MCOI.isOptionalDef()) - OS << "opt:"; - } + if (isDebugValue() && MO.isMetadata()) { // Pretty print DBG_VALUE instructions. auto *DIV = dyn_cast(MO.getMetadata()); Index: llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.cpp =================================================================== --- llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ llvm/trunk/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -1659,7 +1659,7 @@ } for (unsigned i = 3, e = MI0.getNumOperands(); i != e; ++i) { - // %12 = PICLDR %11, 0, pred:14, pred:%noreg + // %12 = PICLDR %11, 0, 14, %noreg const MachineOperand &MO0 = MI0.getOperand(i); const MachineOperand &MO1 = MI1.getOperand(i); if (!MO0.isIdenticalTo(MO1)) @@ -3467,8 +3467,8 @@ } unsigned ARMBaseInstrInfo::getLDMVariableDefsSize(const MachineInstr &MI) const { - // ins GPR:$Rn, pred:$p (2xOp), reglist:$regs, variable_ops - // (outs GPR:$wb), (ins GPR:$Rn, pred:$p (2xOp), reglist:$regs, variable_ops) + // ins GPR:$Rn, $p (2xOp), reglist:$regs, variable_ops + // (outs GPR:$wb), (ins GPR:$Rn, $p (2xOp), reglist:$regs, variable_ops) return MI.getNumOperands() + 1 - MI.getDesc().getNumOperands(); } Index: llvm/trunk/test/CodeGen/ARM/2011-11-14-EarlyClobber.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/2011-11-14-EarlyClobber.ll +++ llvm/trunk/test/CodeGen/ARM/2011-11-14-EarlyClobber.ll @@ -9,7 +9,7 @@ ; ; The early-clobber instruction is an str: ; -; early-clobber %12 = t2STR_PRE %6, %12, 32, pred:14, pred:%noreg +; early-clobber %12 = t2STR_PRE %6, %12, 32, 14, %noreg ; ; This tests that shrinkToUses handles the EC redef correctly. Index: llvm/trunk/test/CodeGen/ARM/2014-01-09-pseudo_expand_implicit_reg.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/2014-01-09-pseudo_expand_implicit_reg.ll +++ llvm/trunk/test/CodeGen/ARM/2014-01-09-pseudo_expand_implicit_reg.ll @@ -4,7 +4,7 @@ define void @vst(i8* %m, [4 x i64] %v) { entry: ; CHECK: vst: -; CHECK: VST1d64Q killed %r{{[0-9]+}}, 8, %d{{[0-9]+}}, pred:14, pred:%noreg, implicit killed %q{{[0-9]+}}_q{{[0-9]+}} +; CHECK: VST1d64Q killed %r{{[0-9]+}}, 8, %d{{[0-9]+}}, 14, %noreg, implicit killed %q{{[0-9]+}}_q{{[0-9]+}} %v0 = extractvalue [4 x i64] %v, 0 %v1 = extractvalue [4 x i64] %v, 1 @@ -37,7 +37,7 @@ %struct.__neon_int8x8x4_t = type { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } define <8 x i8> @vtbx4(<8 x i8>* %A, %struct.__neon_int8x8x4_t* %B, <8 x i8>* %C) nounwind { ; CHECK: vtbx4: -; CHECK: VTBX4 {{.*}}, pred:14, pred:%noreg, implicit %q{{[0-9]+}}_q{{[0-9]+}} +; CHECK: VTBX4 {{.*}}, 14, %noreg, implicit %q{{[0-9]+}}_q{{[0-9]+}} %tmp1 = load <8 x i8>, <8 x i8>* %A %tmp2 = load %struct.__neon_int8x8x4_t, %struct.__neon_int8x8x4_t* %B %tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 0 Index: llvm/trunk/test/CodeGen/ARM/Windows/vla-cpsr.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/Windows/vla-cpsr.ll +++ llvm/trunk/test/CodeGen/ARM/Windows/vla-cpsr.ll @@ -9,5 +9,5 @@ ret void } -; CHECK: tBL pred:14, pred:%noreg, $__chkstk, implicit-def %lr, implicit %sp, implicit killed %r4, implicit-def %r4, implicit-def dead %r12, implicit-def dead %cpsr +; CHECK: tBL 14, %noreg, $__chkstk, implicit-def %lr, implicit %sp, implicit killed %r4, implicit-def %r4, implicit-def dead %r12, implicit-def dead %cpsr Index: llvm/trunk/test/CodeGen/ARM/misched-int-basic-thumb2.mir =================================================================== --- llvm/trunk/test/CodeGen/ARM/misched-int-basic-thumb2.mir +++ llvm/trunk/test/CodeGen/ARM/misched-int-basic-thumb2.mir @@ -42,57 +42,57 @@ # CHECK_SWIFT: Latency : 2 # CHECK_R52: Latency : 2 # -# CHECK: SU(3): %3:rgpr = t2LDRi12 %2, 0, pred:14, pred:%noreg; mem:LD4[@g1](dereferenceable) +# CHECK: SU(3): %3:rgpr = t2LDRi12 %2, 0, 14, %noreg; mem:LD4[@g1](dereferenceable) # CHECK_A9: Latency : 1 # CHECK_SWIFT: Latency : 3 # CHECK_R52: Latency : 4 # -# CHECK : SU(6): %6 = t2ADDrr %3, %3, pred:14, pred:%noreg, opt:%noreg +# CHECK : SU(6): %6 = t2ADDrr %3, %3, 14, %noreg, %noreg # CHECK_A9: Latency : 1 # CHECK_SWIFT: Latency : 1 # CHECK_R52: Latency : 3 -# CHECK: SU(7): %7:rgpr = t2SDIV %6, %5, pred:14, pred:%noreg +# CHECK: SU(7): %7:rgpr = t2SDIV %6, %5, 14, %noreg # CHECK_A9: Latency : 0 # CHECK_SWIFT: Latency : 14 # CHECK_R52: Latency : 8 -# CHECK: SU(8): t2STRi12 %7, %2, 0, pred:14, pred:%noreg; mem:ST4[@g1] +# CHECK: SU(8): t2STRi12 %7, %2, 0, 14, %noreg; mem:ST4[@g1] # CHECK_A9: Latency : 1 # CHECK_SWIFT: Latency : 0 # CHECK_R52: Latency : 4 # -# CHECK: SU(9): %8:rgpr = t2SMULBB %1, %1, pred:14, pred:%noreg +# CHECK: SU(9): %8:rgpr = t2SMULBB %1, %1, 14, %noreg # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(10): %9:rgpr = t2SMLABB %0, %0, %8, pred:14, pred:%noreg +# CHECK: SU(10): %9:rgpr = t2SMLABB %0, %0, %8, 14, %noreg # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(11): %10:rgpr = t2UXTH %9, 0, pred:14, pred:%noreg +# CHECK: SU(11): %10:rgpr = t2UXTH %9, 0, 14, %noreg # CHECK_A9: Latency : 1 # CHECK_SWIFT: Latency : 1 # CHECK_R52: Latency : 3 # -# CHECK: SU(12): %11:rgpr = t2MUL %10, %7, pred:14, pred:%noreg +# CHECK: SU(12): %11:rgpr = t2MUL %10, %7, 14, %noreg # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(13): %12:rgpr = t2MLA %11, %11, %11, pred:14, pred:%noreg +# CHECK: SU(13): %12:rgpr = t2MLA %11, %11, %11, 14, %noreg # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(14): %13:rgpr, %14:rgpr = t2UMULL %12, %12, pred:14, pred:%noreg +# CHECK: SU(14): %13:rgpr, %14:rgpr = t2UMULL %12, %12, 14, %noreg # CHECK_A9: Latency : 3 # CHECK_SWIFT: Latency : 5 # CHECK_R52: Latency : 4 # -# CHECK: SU(18): %19:rgpr, %20:rgpr = t2UMLAL %12, %12, %19, %20, pred:14, pred:%noreg +# CHECK: SU(18): %19:rgpr, %20:rgpr = t2UMLAL %12, %12, %19, %20, 14, %noreg # CHECK_A9: Latency : 3 # CHECK_SWIFT: Latency : 7 # CHECK_R52: Latency : 4 Index: llvm/trunk/test/CodeGen/ARM/misched-int-basic.mir =================================================================== --- llvm/trunk/test/CodeGen/ARM/misched-int-basic.mir +++ llvm/trunk/test/CodeGen/ARM/misched-int-basic.mir @@ -28,37 +28,37 @@ } # CHECK: ********** MI Scheduling ********** -# CHECK: SU(2): %2:gpr = SMULBB %1, %1, pred:14, pred:%noreg +# CHECK: SU(2): %2:gpr = SMULBB %1, %1, 14, %noreg # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(3): %3:gprnopc = SMLABB %0, %0, %2, pred:14, pred:%noreg +# CHECK: SU(3): %3:gprnopc = SMLABB %0, %0, %2, 14, %noreg # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(4): %4:gprnopc = UXTH %3, 0, pred:14, pred:%noreg +# CHECK: SU(4): %4:gprnopc = UXTH %3, 0, 14, %noreg # CHECK_A9: Latency : 1 # CHECK_SWIFT: Latency : 1 # CHECK_R52: Latency : 3 # -# CHECK: SU(5): %5:gprnopc = MUL %4, %4, pred:14, pred:%noreg, opt:%noreg +# CHECK: SU(5): %5:gprnopc = MUL %4, %4, 14, %noreg, %noreg # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(6): %6:gprnopc = MLA %5, %5, %5, pred:14, pred:%noreg, opt:%noreg +# CHECK: SU(6): %6:gprnopc = MLA %5, %5, %5, 14, %noreg, %noreg # CHECK_A9: Latency : 2 # CHECK_SWIFT: Latency : 4 # CHECK_R52: Latency : 4 # -# CHECK: SU(7): %7:gprnopc, %8:gprnopc = UMULL %6, %6, pred:14, pred:%noreg, opt:%noreg +# CHECK: SU(7): %7:gprnopc, %8:gprnopc = UMULL %6, %6, 14, %noreg, %noreg # CHECK_A9: Latency : 3 # CHECK_SWIFT: Latency : 5 # CHECK_R52: Latency : 4 # -# CHECK: SU(11): %13:gpr, %14:gprnopc = UMLAL %6, %6, %13, %14, pred:14, pred:%noreg, opt:%noreg +# CHECK: SU(11): %13:gpr, %14:gprnopc = UMLAL %6, %6, %13, %14, 14, %noreg, %noreg # CHECK_SWIFT: Latency : 7 # CHECK_A9: Latency : 3 # CHECK_R52: Latency : 4 Index: llvm/trunk/test/CodeGen/ARM/single-issue-r52.mir =================================================================== --- llvm/trunk/test/CodeGen/ARM/single-issue-r52.mir +++ llvm/trunk/test/CodeGen/ARM/single-issue-r52.mir @@ -20,13 +20,13 @@ # CHECK: ********** MI Scheduling ********** # CHECK: ScheduleDAGMILive::schedule starting -# CHECK: SU(1): %1:qqpr = VLD4d8Pseudo %0, 8, pred:14, pred:%noreg; mem:LD32[%A](align=8) +# CHECK: SU(1): %1:qqpr = VLD4d8Pseudo %0, 8, 14, %noreg; mem:LD32[%A](align=8) # CHECK: Latency : 8 # CHECK: Single Issue : true; -# CHECK: SU(2): %4:dpr = VADDv8i8 %1.dsub_0, %1.dsub_1, pred:14, pred:%noreg +# CHECK: SU(2): %4:dpr = VADDv8i8 %1.dsub_0, %1.dsub_1, 14, %noreg # CHECK: Latency : 5 # CHECK: Single Issue : false; -# CHECK: SU(3): %5:gpr, %6:gpr = VMOVRRD %4, pred:14, pred:%noreg +# CHECK: SU(3): %5:gpr, %6:gpr = VMOVRRD %4, 14, %noreg # CHECK: Latency : 4 # CHECK: Single Issue : false; Index: llvm/trunk/test/CodeGen/ARM/subreg-remat.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/subreg-remat.ll +++ llvm/trunk/test/CodeGen/ARM/subreg-remat.ll @@ -5,7 +5,7 @@ ; The vector %v2 is built like this: ; ; %6:ssub_1 = ... -; %6:ssub_0 = VLDRS %const.0, 0, pred:14, pred:%noreg; mem:LD4[ConstantPool] DPR_VFP2:%6 +; %6:ssub_0 = VLDRS %const.0, 0, 14, %noreg; mem:LD4[ConstantPool] DPR_VFP2:%6 ; ; When %6 spills, the VLDRS constant pool load cannot be rematerialized ; since it implicitly reads the ssub_1 sub-register. @@ -31,7 +31,7 @@ ; because the bits are undef, we should rematerialize. The vector is now built ; like this: ; -; %2:ssub_0 = VLDRS %const.0, 0, pred:14, pred:%noreg, implicit-def %2; mem:LD4[ConstantPool] +; %2:ssub_0 = VLDRS %const.0, 0, 14, %noreg, implicit-def %2; mem:LD4[ConstantPool] ; ; The extra operand indicates that the instruction fully defines the ; virtual register. It doesn't read the old value. Index: llvm/trunk/test/CodeGen/Thumb2/2010-06-14-NEONCoalescer.ll =================================================================== --- llvm/trunk/test/CodeGen/Thumb2/2010-06-14-NEONCoalescer.ll +++ llvm/trunk/test/CodeGen/Thumb2/2010-06-14-NEONCoalescer.ll @@ -5,8 +5,8 @@ ; This is a case where the coalescer was too eager. These two copies were ; considered equivalent and coalescable: ; -; 140 %reg1038:dsub_0 = VMOVD %reg1047:dsub_0, pred:14, pred:%reg0 -; 148 %reg1038:dsub_1 = VMOVD %reg1047:dsub_0, pred:14, pred:%reg0 +; 140 %reg1038:dsub_0 = VMOVD %reg1047:dsub_0, 14, %reg0 +; 148 %reg1038:dsub_1 = VMOVD %reg1047:dsub_0, 14, %reg0 ; ; Only one can be coalesced.