Index: lib/CodeGen/MachineCombiner.cpp =================================================================== --- lib/CodeGen/MachineCombiner.cpp +++ lib/CodeGen/MachineCombiner.cpp @@ -242,20 +242,19 @@ // Get depth, latency and slack of Root. unsigned RootDepth = BlockTrace.getInstrCycles(Root).Depth; unsigned RootLatency = TSchedModel.computeInstrLatency(Root); - unsigned RootSlack = BlockTrace.getInstrSlack(Root); DEBUG(dbgs() << "DEPENDENCE DATA FOR " << Root << "\n"; dbgs() << " NewRootDepth: " << NewRootDepth << " NewRootLatency: " << NewRootLatency << "\n"; dbgs() << " RootDepth: " << RootDepth << " RootLatency: " << RootLatency - << " RootSlack: " << RootSlack << "\n"; + << "\n"; dbgs() << " NewRootDepth + NewRootLatency = " << NewRootDepth + NewRootLatency << "\n"; - dbgs() << " RootDepth + RootLatency + RootSlack = " - << RootDepth + RootLatency + RootSlack << "\n";); + dbgs() << " RootDepth + RootLatency = " + << RootDepth + RootLatency << "\n";); unsigned NewCycleCount = NewRootDepth + NewRootLatency; - unsigned OldCycleCount = RootDepth + RootLatency + RootSlack; + unsigned OldCycleCount = RootDepth + RootLatency; if (NewCodeHasLessInsts) return NewCycleCount <= OldCycleCount; Index: test/CodeGen/X86/machine-combiner.ll =================================================================== --- test/CodeGen/X86/machine-combiner.ll +++ test/CodeGen/X86/machine-combiner.ll @@ -632,10 +632,10 @@ ; AVX-NEXT: callq bar ; AVX-NEXT: vmovsd %xmm0, (%rsp) ; AVX-NEXT: callq bar -; AVX-NEXT: vmovsd (%rsp), %xmm1 -; AVX: vaddsd 8(%rsp), %xmm1, %xmm1 +; AVX-NEXT: vmovsd 8(%rsp), %xmm1 +; AVX: vaddsd 16(%rsp), %xmm1, %xmm1 +; AVX-NEXT: vaddsd (%rsp), %xmm0, %xmm0 ; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0 -; AVX-NEXT: vaddsd 16(%rsp), %xmm0, %xmm0 %x0 = call double @bar() %x1 = call double @bar() @@ -656,9 +656,10 @@ ; AVX-NEXT: callq bar ; AVX-NEXT: vmovsd %xmm0, (%rsp) ; AVX-NEXT: callq bar +; AVX-NEXT: vmovsd 8(%rsp), %xmm1 +; AVX: vaddsd 16(%rsp), %xmm1, %xmm1 ; AVX-NEXT: vaddsd (%rsp), %xmm0, %xmm0 -; AVX-NEXT: vaddsd 8(%rsp), %xmm0, %xmm0 -; AVX-NEXT: vaddsd 16(%rsp), %xmm0, %xmm0 +; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0 %x0 = call double @bar() %x1 = call double @bar()