diff --git a/llvm/lib/CodeGen/LiveIntervals.cpp b/llvm/lib/CodeGen/LiveIntervals.cpp --- a/llvm/lib/CodeGen/LiveIntervals.cpp +++ b/llvm/lib/CodeGen/LiveIntervals.cpp @@ -508,7 +508,6 @@ bool LiveIntervals::computeDeadValues(LiveInterval &LI, SmallVectorImpl *dead) { bool MayHaveSplitComponents = false; - bool HaveDeadDef = false; for (VNInfo *VNI : LI.valnos) { if (VNI->isUnused()) @@ -534,21 +533,18 @@ VNI->markUnused(); LI.removeSegment(I); LLVM_DEBUG(dbgs() << "Dead PHI at " << Def << " may separate interval\n"); - MayHaveSplitComponents = true; } else { // This is a dead def. Make sure the instruction knows. MachineInstr *MI = getInstructionFromIndex(Def); assert(MI && "No instruction defining live value"); MI->addRegisterDead(LI.reg(), TRI); - if (HaveDeadDef) - MayHaveSplitComponents = true; - HaveDeadDef = true; if (dead && MI->allDefsAreDead()) { LLVM_DEBUG(dbgs() << "All defs dead: " << Def << '\t' << *MI); dead->push_back(MI); } } + MayHaveSplitComponents = true; } return MayHaveSplitComponents; } diff --git a/llvm/test/CodeGen/AMDGPU/optimize-negated-cond-exec-masking-wave32.mir b/llvm/test/CodeGen/AMDGPU/optimize-negated-cond-exec-masking-wave32.mir --- a/llvm/test/CodeGen/AMDGPU/optimize-negated-cond-exec-masking-wave32.mir +++ b/llvm/test/CodeGen/AMDGPU/optimize-negated-cond-exec-masking-wave32.mir @@ -74,7 +74,7 @@ # GCN: name: negated_cond_vop3_redef_cmp # GCN: %0:sgpr_32 = IMPLICIT_DEF # GCN-NEXT: %1:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, %0, implicit $exec -# GCN-NEXT: %2:sgpr_32 = V_CMP_NE_U32_e64 %1, 1, implicit $exec +# GCN-NEXT: dead %3:sgpr_32 = V_CMP_NE_U32_e64 %1, 1, implicit $exec # GCN-NEXT: %2:sgpr_32 = COPY $sgpr0 # GCN-NEXT: $vcc_lo = S_AND_B32 %2, $exec_lo, implicit-def dead $scc # GCN-NEXT: S_CBRANCH_VCCNZ %bb.2, implicit $vcc @@ -161,7 +161,7 @@ # GCN: name: negated_cond_vop3_redef_sel # GCN: %0:sgpr_32 = IMPLICIT_DEF -# GCN-NEXT: %1:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, %0, implicit $exec +# GCN-NEXT: dead %3:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, %0, implicit $exec # GCN-NEXT: %1:vgpr_32 = COPY $vgpr0 # GCN-NEXT: %2:sgpr_32 = V_CMP_NE_U32_e64 %1, 1, implicit $exec # GCN-NEXT: $vcc_lo = S_AND_B32 %2, $exec_lo, implicit-def dead $scc diff --git a/llvm/test/CodeGen/AMDGPU/optimize-negated-cond-exec-masking.mir b/llvm/test/CodeGen/AMDGPU/optimize-negated-cond-exec-masking.mir --- a/llvm/test/CodeGen/AMDGPU/optimize-negated-cond-exec-masking.mir +++ b/llvm/test/CodeGen/AMDGPU/optimize-negated-cond-exec-masking.mir @@ -186,7 +186,7 @@ # GCN: name: negated_cond_vop3_redef_sel # GCN: %0:sreg_64_xexec = IMPLICIT_DEF -# GCN-NEXT: %1:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, %0, implicit $exec +# GCN-NEXT: dead %3:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, %0, implicit $exec # GCN-NEXT: %1:vgpr_32 = COPY $vgpr0 # GCN-NEXT: %2:sreg_64_xexec = V_CMP_NE_U32_e64 %1, 1, implicit $exec # GCN-NEXT: $vcc = S_AND_B64 %2, $exec, implicit-def dead $scc diff --git a/llvm/test/CodeGen/ARM/cortex-a57-misched-mla.mir b/llvm/test/CodeGen/ARM/cortex-a57-misched-mla.mir --- a/llvm/test/CodeGen/ARM/cortex-a57-misched-mla.mir +++ b/llvm/test/CodeGen/ARM/cortex-a57-misched-mla.mir @@ -11,7 +11,6 @@ # CHECK-NEXT: Height # CHECK-NEXT: Predecessors: # CHECK-NEXT: SU({{.*}}): Data Latency=1 Reg= -# CHECK-NEXT: SU({{.*}}): Out Latency= # CHECK-NEXT: SU({{.*}}): Data Latency=1 Reg= # CHECK-NEXT: Successors: # CHECK-NEXT: SU([[SMLA_SU:[0-9]+]]): Data Latency=1 Reg=%[[RES]]