Index: lib/CodeGen/BranchFolding.cpp =================================================================== --- lib/CodeGen/BranchFolding.cpp +++ lib/CodeGen/BranchFolding.cpp @@ -1074,28 +1074,24 @@ if (!EnableTailMerge) return MadeChange; // First find blocks with no successors. - // Block placement does not create new tail merging opportunities for these - // blocks. - if (!AfterBlockPlacement) { - MergePotentials.clear(); - for (MachineBasicBlock &MBB : MF) { - if (MergePotentials.size() == TailMergeThreshold) - break; - if (!TriedMerging.count(&MBB) && MBB.succ_empty()) - MergePotentials.push_back(MergePotentialsElt(HashEndOfMBB(MBB), &MBB)); - } - - // If this is a large problem, avoid visiting the same basic blocks - // multiple times. + MergePotentials.clear(); + for (MachineBasicBlock &MBB : MF) { if (MergePotentials.size() == TailMergeThreshold) - for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i) - TriedMerging.insert(MergePotentials[i].getBlock()); - - // See if we can do any tail merging on those. - if (MergePotentials.size() >= 2) - MadeChange |= TryTailMergeBlocks(nullptr, nullptr, MinCommonTailLength); + break; + if (!TriedMerging.count(&MBB) && MBB.succ_empty()) + MergePotentials.push_back(MergePotentialsElt(HashEndOfMBB(MBB), &MBB)); } + // If this is a large problem, avoid visiting the same basic blocks + // multiple times. + if (MergePotentials.size() == TailMergeThreshold) + for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i) + TriedMerging.insert(MergePotentials[i].getBlock()); + + // See if we can do any tail merging on those. + if (MergePotentials.size() >= 2) + MadeChange |= TryTailMergeBlocks(nullptr, nullptr, MinCommonTailLength); + // Look at blocks (IBB) with multiple predecessors (PBB). // We change each predecessor to a canonical form, by // (1) temporarily removing any unconditional branch from the predecessor Index: test/CodeGen/Thumb2/v8_IT_3.ll =================================================================== --- test/CodeGen/Thumb2/v8_IT_3.ll +++ test/CodeGen/Thumb2/v8_IT_3.ll @@ -62,8 +62,7 @@ ; CHECK-PIC-NEXT: beq ; CHECK-PIC: %bb6 ; CHECK-PIC-NEXT: movs -; CHECK-PIC-NEXT: add -; CHECK-PIC-NEXT: pop +; CHECK-PIC-NEXT: b ret i32 0 bb6: Index: test/CodeGen/WebAssembly/cfg-stackify.ll =================================================================== --- test/CodeGen/WebAssembly/cfg-stackify.ll +++ test/CodeGen/WebAssembly/cfg-stackify.ll @@ -540,13 +540,8 @@ ; OPT: br 1{{$}} ; OPT-NEXT: .LBB13_3: ; OPT-NEXT: end_block{{$}} -; OPT-NEXT: block {{$}} ; OPT: br_if 0, $pop{{[0-9]+}}{{$}} -; OPT: br_if 1, $pop{{[0-9]+}}{{$}} -; OPT-NEXT: .LBB13_5: -; OPT-NEXT: end_block{{$}} -; OPT-NEXT: return{{$}} -; OPT-NEXT: .LBB13_6: +; OPT: .LBB13_5: ; OPT-NEXT: end_block{{$}} ; OPT-NEXT: return{{$}} define void @test4(i32 %t) { Index: test/CodeGen/WinEH/wineh-noret-cleanup.ll =================================================================== --- test/CodeGen/WinEH/wineh-noret-cleanup.ll +++ test/CodeGen/WinEH/wineh-noret-cleanup.ll @@ -68,13 +68,13 @@ ; SEH-NEXT: .long .Ltmp0@IMGREL+1 ; SEH-NEXT: .long .Ltmp1@IMGREL+1 ; SEH-NEXT: .long dummy_filter@IMGREL -; SEH-NEXT: .long .LBB0_5@IMGREL +; SEH-NEXT: .long .LBB0_2@IMGREL ; SEH-NEXT: .long .Ltmp2@IMGREL+1 ; SEH-NEXT: .long .Ltmp3@IMGREL+1 -; SEH-NEXT: .long "?dtor$2@?0?test@4HA"@IMGREL +; SEH-NEXT: .long "?dtor$5@?0?test@4HA"@IMGREL ; SEH-NEXT: .long 0 ; SEH-NEXT: .long .Ltmp2@IMGREL+1 ; SEH-NEXT: .long .Ltmp3@IMGREL+1 ; SEH-NEXT: .long dummy_filter@IMGREL -; SEH-NEXT: .long .LBB0_5@IMGREL +; SEH-NEXT: .long .LBB0_2@IMGREL ; SEH-NEXT: .Llsda_end0: Index: test/CodeGen/X86/conditional-tailcall.ll =================================================================== --- test/CodeGen/X86/conditional-tailcall.ll +++ test/CodeGen/X86/conditional-tailcall.ll @@ -134,7 +134,7 @@ ; Make sure Machine Copy Propagation doesn't delete the mov to %ecx becaue it ; thinks the conditional tail call clobbers it. -; CHECK64-LABEL: .LBB3_11: +; CHECK64-LABEL: .LBB3_9: ; CHECK64: movzbl (%rdi), %ecx ; CHECK64-NEXT: addl $-48, %ecx ; CHECK64-NEXT: cmpl $10, %ecx Index: test/CodeGen/X86/loop-search.ll =================================================================== --- test/CodeGen/X86/loop-search.ll +++ test/CodeGen/X86/loop-search.ll @@ -8,26 +8,21 @@ ; CHECK-LABEL: search: ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: testl %edx, %edx -; CHECK-NEXT: jle LBB0_1 -; CHECK-NEXT: ## %bb.4: ## %for.body.preheader +; CHECK-NEXT: jle LBB0_5 +; CHECK-NEXT: ## %bb.1: ## %for.body.preheader ; CHECK-NEXT: movslq %edx, %rax ; CHECK-NEXT: xorl %ecx, %ecx ; CHECK-NEXT: .p2align 4, 0x90 -; CHECK-NEXT: LBB0_5: ## %for.body +; CHECK-NEXT: LBB0_2: ## %for.body ; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: cmpl %edi, (%rsi,%rcx,4) ; CHECK-NEXT: je LBB0_6 -; CHECK-NEXT: ## %bb.2: ## %for.cond -; CHECK-NEXT: ## in Loop: Header=BB0_5 Depth=1 +; CHECK-NEXT: ## %bb.3: ## %for.cond +; CHECK-NEXT: ## in Loop: Header=BB0_2 Depth=1 ; CHECK-NEXT: incq %rcx ; CHECK-NEXT: cmpq %rax, %rcx -; CHECK-NEXT: jl LBB0_5 -; ### FIXME: %bb.3 and LBB0_1 should be merged -; CHECK-NEXT: ## %bb.3: -; CHECK-NEXT: xorl %eax, %eax -; CHECK-NEXT: ## kill: def $al killed $al killed $eax -; CHECK-NEXT: retq -; CHECK-NEXT: LBB0_1: +; CHECK-NEXT: jl LBB0_2 +; CHECK-NEXT: LBB0_5: ; CHECK-NEXT: xorl %eax, %eax ; CHECK-NEXT: ## kill: def $al killed $al killed $eax ; CHECK-NEXT: retq Index: test/CodeGen/X86/machine-cp.ll =================================================================== --- test/CodeGen/X86/machine-cp.ll +++ test/CodeGen/X86/machine-cp.ll @@ -8,22 +8,21 @@ ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: movl %edi, %eax ; CHECK-NEXT: testl %esi, %esi -; CHECK-NEXT: je LBB0_1 -; CHECK-NEXT: ## %bb.2: ## %while.body.preheader +; CHECK-NEXT: je LBB0_4 +; CHECK-NEXT: ## %bb.1: ## %while.body.preheader ; CHECK-NEXT: movl %esi, %edx ; CHECK-NEXT: .p2align 4, 0x90 -; CHECK-NEXT: LBB0_3: ## %while.body +; CHECK-NEXT: LBB0_2: ## %while.body ; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: movl %edx, %ecx ; CHECK-NEXT: cltd ; CHECK-NEXT: idivl %ecx ; CHECK-NEXT: testl %edx, %edx ; CHECK-NEXT: movl %ecx, %eax -; CHECK-NEXT: jne LBB0_3 -; CHECK-NEXT: ## %bb.4: ## %while.end +; CHECK-NEXT: jne LBB0_2 +; CHECK-NEXT: ## %bb.3: ## %while.end ; CHECK-NEXT: movl %ecx, %eax -; CHECK-NEXT: retq -; CHECK-NEXT: LBB0_1: +; CHECK-NEXT: LBB0_4: ; CHECK-NEXT: retq entry: %cmp1 = icmp eq i32 %b, 0 @@ -60,22 +59,21 @@ ; CHECK: ## %bb.0: ## %entry ; CHECK-NEXT: movq %rdi, %rax ; CHECK-NEXT: testq %rsi, %rsi -; CHECK-NEXT: je LBB2_1 -; CHECK-NEXT: ## %bb.2: ## %while.body.preheader +; CHECK-NEXT: je LBB2_4 +; CHECK-NEXT: ## %bb.1: ## %while.body.preheader ; CHECK-NEXT: movq %rsi, %rdx ; CHECK-NEXT: .p2align 4, 0x90 -; CHECK-NEXT: LBB2_3: ## %while.body +; CHECK-NEXT: LBB2_2: ## %while.body ; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: movq %rdx, %rcx ; CHECK-NEXT: cqto ; CHECK-NEXT: idivq %rcx ; CHECK-NEXT: testq %rdx, %rdx ; CHECK-NEXT: movq %rcx, %rax -; CHECK-NEXT: jne LBB2_3 -; CHECK-NEXT: ## %bb.4: ## %while.end +; CHECK-NEXT: jne LBB2_2 +; CHECK-NEXT: ## %bb.3: ## %while.end ; CHECK-NEXT: movl %ecx, %eax -; CHECK-NEXT: retq -; CHECK-NEXT: LBB2_1: +; CHECK-NEXT: LBB2_4: ; CHECK-NEXT: retq entry: %cmp1 = icmp eq i64 %b, 0 Index: test/CodeGen/X86/mul-constant-result.ll =================================================================== --- test/CodeGen/X86/mul-constant-result.ll +++ test/CodeGen/X86/mul-constant-result.ll @@ -28,7 +28,7 @@ ; X86-NEXT: .LBB0_4: ; X86-NEXT: decl %ecx ; X86-NEXT: cmpl $31, %ecx -; X86-NEXT: ja .LBB0_39 +; X86-NEXT: ja .LBB0_7 ; X86-NEXT: # %bb.5: ; X86-NEXT: jmpl *.LJTI0_0(,%ecx,4) ; X86-NEXT: .LBB0_6: @@ -36,209 +36,149 @@ ; X86-NEXT: popl %esi ; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl -; X86-NEXT: .LBB0_39: -; X86-NEXT: .cfi_def_cfa_offset 8 -; X86-NEXT: xorl %eax, %eax -; X86-NEXT: .LBB0_40: -; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 -; X86-NEXT: retl ; X86-NEXT: .LBB0_7: ; X86-NEXT: .cfi_def_cfa_offset 8 -; X86-NEXT: leal (%eax,%eax,2), %eax -; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 -; X86-NEXT: retl +; X86-NEXT: xorl %eax, %eax ; X86-NEXT: .LBB0_8: -; X86-NEXT: .cfi_def_cfa_offset 8 -; X86-NEXT: shll $2, %eax -; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 -; X86-NEXT: retl -; X86-NEXT: .LBB0_9: -; X86-NEXT: .cfi_def_cfa_offset 8 -; X86-NEXT: leal (%eax,%eax,4), %eax ; X86-NEXT: popl %esi ; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_10: ; X86-NEXT: .cfi_def_cfa_offset 8 -; X86-NEXT: addl %eax, %eax -; X86-NEXT: leal (%eax,%eax,2), %eax +; X86-NEXT: shll $2, %eax ; X86-NEXT: popl %esi ; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl -; X86-NEXT: .LBB0_11: +; X86-NEXT: .LBB0_12: ; X86-NEXT: .cfi_def_cfa_offset 8 -; X86-NEXT: leal (,%eax,8), %ecx -; X86-NEXT: jmp .LBB0_12 +; X86-NEXT: addl %eax, %eax +; X86-NEXT: jmp .LBB0_9 ; X86-NEXT: .LBB0_13: -; X86-NEXT: shll $3, %eax -; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 -; X86-NEXT: retl +; X86-NEXT: leal (,%eax,8), %ecx +; X86-NEXT: jmp .LBB0_41 ; X86-NEXT: .LBB0_14: -; X86-NEXT: .cfi_def_cfa_offset 8 -; X86-NEXT: leal (%eax,%eax,8), %eax -; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 -; X86-NEXT: retl -; X86-NEXT: .LBB0_15: -; X86-NEXT: .cfi_def_cfa_offset 8 -; X86-NEXT: addl %eax, %eax -; X86-NEXT: leal (%eax,%eax,4), %eax +; X86-NEXT: shll $3, %eax ; X86-NEXT: popl %esi ; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl ; X86-NEXT: .LBB0_16: ; X86-NEXT: .cfi_def_cfa_offset 8 -; X86-NEXT: leal (%eax,%eax,4), %ecx -; X86-NEXT: leal (%eax,%ecx,2), %eax -; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 -; X86-NEXT: retl +; X86-NEXT: addl %eax, %eax +; X86-NEXT: jmp .LBB0_11 ; X86-NEXT: .LBB0_17: -; X86-NEXT: .cfi_def_cfa_offset 8 +; X86-NEXT: leal (%eax,%eax,4), %ecx +; X86-NEXT: jmp .LBB0_18 +; X86-NEXT: .LBB0_19: ; X86-NEXT: shll $2, %eax -; X86-NEXT: leal (%eax,%eax,2), %eax -; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 -; X86-NEXT: retl -; X86-NEXT: .LBB0_18: -; X86-NEXT: .cfi_def_cfa_offset 8 +; X86-NEXT: jmp .LBB0_9 +; X86-NEXT: .LBB0_20: ; X86-NEXT: leal (%eax,%eax,2), %ecx -; X86-NEXT: leal (%eax,%ecx,4), %eax -; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 -; X86-NEXT: retl -; X86-NEXT: .LBB0_19: -; X86-NEXT: .cfi_def_cfa_offset 8 +; X86-NEXT: jmp .LBB0_21 +; X86-NEXT: .LBB0_22: ; X86-NEXT: movl %eax, %ecx ; X86-NEXT: shll $4, %ecx ; X86-NEXT: subl %eax, %ecx -; X86-NEXT: jmp .LBB0_12 -; X86-NEXT: .LBB0_21: +; X86-NEXT: jmp .LBB0_41 +; X86-NEXT: .LBB0_23: ; X86-NEXT: leal (%eax,%eax,4), %eax -; X86-NEXT: leal (%eax,%eax,2), %eax -; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 -; X86-NEXT: retl -; X86-NEXT: .LBB0_22: -; X86-NEXT: .cfi_def_cfa_offset 8 +; X86-NEXT: jmp .LBB0_9 +; X86-NEXT: .LBB0_24: ; X86-NEXT: shll $4, %eax ; X86-NEXT: popl %esi ; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl -; X86-NEXT: .LBB0_23: +; X86-NEXT: .LBB0_25: ; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: movl %eax, %ecx ; X86-NEXT: shll $4, %ecx -; X86-NEXT: addl %ecx, %eax -; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 -; X86-NEXT: retl -; X86-NEXT: .LBB0_24: -; X86-NEXT: .cfi_def_cfa_offset 8 +; X86-NEXT: jmp .LBB0_26 +; X86-NEXT: .LBB0_27: ; X86-NEXT: addl %eax, %eax +; X86-NEXT: .LBB0_15: ; X86-NEXT: leal (%eax,%eax,8), %eax ; X86-NEXT: popl %esi ; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl -; X86-NEXT: .LBB0_25: +; X86-NEXT: .LBB0_28: ; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,8), %ecx +; X86-NEXT: .LBB0_18: ; X86-NEXT: leal (%eax,%ecx,2), %eax ; X86-NEXT: popl %esi ; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl -; X86-NEXT: .LBB0_26: +; X86-NEXT: .LBB0_29: ; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: shll $2, %eax -; X86-NEXT: leal (%eax,%eax,4), %eax -; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 -; X86-NEXT: retl -; X86-NEXT: .LBB0_27: -; X86-NEXT: .cfi_def_cfa_offset 8 +; X86-NEXT: jmp .LBB0_11 +; X86-NEXT: .LBB0_30: ; X86-NEXT: leal (%eax,%eax,4), %ecx +; X86-NEXT: .LBB0_21: ; X86-NEXT: leal (%eax,%ecx,4), %eax ; X86-NEXT: popl %esi ; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl -; X86-NEXT: .LBB0_28: +; X86-NEXT: .LBB0_31: ; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,4), %ecx ; X86-NEXT: leal (%eax,%ecx,4), %ecx -; X86-NEXT: addl %ecx, %eax -; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 -; X86-NEXT: retl -; X86-NEXT: .LBB0_29: -; X86-NEXT: .cfi_def_cfa_offset 8 +; X86-NEXT: jmp .LBB0_26 +; X86-NEXT: .LBB0_32: ; X86-NEXT: leal (%eax,%eax,2), %ecx ; X86-NEXT: shll $3, %ecx -; X86-NEXT: jmp .LBB0_12 -; X86-NEXT: .LBB0_30: +; X86-NEXT: jmp .LBB0_41 +; X86-NEXT: .LBB0_33: ; X86-NEXT: shll $3, %eax -; X86-NEXT: leal (%eax,%eax,2), %eax -; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 -; X86-NEXT: retl -; X86-NEXT: .LBB0_31: -; X86-NEXT: .cfi_def_cfa_offset 8 +; X86-NEXT: jmp .LBB0_9 +; X86-NEXT: .LBB0_34: ; X86-NEXT: leal (%eax,%eax,4), %eax +; X86-NEXT: .LBB0_11: ; X86-NEXT: leal (%eax,%eax,4), %eax ; X86-NEXT: popl %esi ; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl -; X86-NEXT: .LBB0_32: +; X86-NEXT: .LBB0_35: ; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,4), %ecx ; X86-NEXT: leal (%ecx,%ecx,4), %ecx -; X86-NEXT: addl %ecx, %eax -; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 -; X86-NEXT: retl -; X86-NEXT: .LBB0_33: -; X86-NEXT: .cfi_def_cfa_offset 8 +; X86-NEXT: jmp .LBB0_26 +; X86-NEXT: .LBB0_36: ; X86-NEXT: leal (%eax,%eax,8), %eax +; X86-NEXT: .LBB0_9: ; X86-NEXT: leal (%eax,%eax,2), %eax ; X86-NEXT: popl %esi ; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl -; X86-NEXT: .LBB0_34: +; X86-NEXT: .LBB0_37: ; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: leal (%eax,%eax,8), %ecx ; X86-NEXT: leal (%ecx,%ecx,2), %ecx -; X86-NEXT: addl %ecx, %eax -; X86-NEXT: popl %esi -; X86-NEXT: .cfi_def_cfa_offset 4 -; X86-NEXT: retl -; X86-NEXT: .LBB0_35: -; X86-NEXT: .cfi_def_cfa_offset 8 +; X86-NEXT: jmp .LBB0_26 +; X86-NEXT: .LBB0_38: ; X86-NEXT: leal (%eax,%eax,8), %ecx ; X86-NEXT: leal (%ecx,%ecx,2), %ecx ; X86-NEXT: addl %eax, %ecx +; X86-NEXT: .LBB0_26: ; X86-NEXT: addl %ecx, %eax ; X86-NEXT: popl %esi ; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl -; X86-NEXT: .LBB0_36: +; X86-NEXT: .LBB0_39: ; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: movl %eax, %ecx ; X86-NEXT: shll $5, %ecx ; X86-NEXT: subl %eax, %ecx -; X86-NEXT: jmp .LBB0_12 -; X86-NEXT: .LBB0_37: +; X86-NEXT: jmp .LBB0_41 +; X86-NEXT: .LBB0_40: ; X86-NEXT: movl %eax, %ecx ; X86-NEXT: shll $5, %ecx -; X86-NEXT: .LBB0_12: +; X86-NEXT: .LBB0_41: ; X86-NEXT: subl %eax, %ecx ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: popl %esi ; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl -; X86-NEXT: .LBB0_38: +; X86-NEXT: .LBB0_42: ; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: shll $5, %eax ; X86-NEXT: popl %esi @@ -256,158 +196,149 @@ ; X64-HSW-NEXT: cmovel %ecx, %eax ; X64-HSW-NEXT: addl $-1, %edi ; X64-HSW-NEXT: cmpl $31, %edi -; X64-HSW-NEXT: ja .LBB0_36 +; X64-HSW-NEXT: ja .LBB0_3 ; X64-HSW-NEXT: # %bb.1: ; X64-HSW-NEXT: jmpq *.LJTI0_0(,%rdi,8) ; X64-HSW-NEXT: .LBB0_2: ; X64-HSW-NEXT: addl %eax, %eax ; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq -; X64-HSW-NEXT: .LBB0_36: -; X64-HSW-NEXT: xorl %eax, %eax -; X64-HSW-NEXT: .LBB0_37: -; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax -; X64-HSW-NEXT: retq ; X64-HSW-NEXT: .LBB0_3: -; X64-HSW-NEXT: leal (%rax,%rax,2), %eax -; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax -; X64-HSW-NEXT: retq +; X64-HSW-NEXT: xorl %eax, %eax ; X64-HSW-NEXT: .LBB0_4: -; X64-HSW-NEXT: shll $2, %eax ; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq -; X64-HSW-NEXT: .LBB0_5: -; X64-HSW-NEXT: leal (%rax,%rax,4), %eax +; X64-HSW-NEXT: .LBB0_6: +; X64-HSW-NEXT: shll $2, %eax ; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq -; X64-HSW-NEXT: .LBB0_6: +; X64-HSW-NEXT: .LBB0_8: ; X64-HSW-NEXT: addl %eax, %eax +; X64-HSW-NEXT: .LBB0_5: ; X64-HSW-NEXT: leal (%rax,%rax,2), %eax ; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq -; X64-HSW-NEXT: .LBB0_7: -; X64-HSW-NEXT: leal (,%rax,8), %ecx -; X64-HSW-NEXT: jmp .LBB0_8 ; X64-HSW-NEXT: .LBB0_9: -; X64-HSW-NEXT: shll $3, %eax -; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax -; X64-HSW-NEXT: retq +; X64-HSW-NEXT: leal (,%rax,8), %ecx +; X64-HSW-NEXT: jmp .LBB0_37 ; X64-HSW-NEXT: .LBB0_10: -; X64-HSW-NEXT: leal (%rax,%rax,8), %eax +; X64-HSW-NEXT: shll $3, %eax ; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq -; X64-HSW-NEXT: .LBB0_11: +; X64-HSW-NEXT: .LBB0_12: ; X64-HSW-NEXT: addl %eax, %eax +; X64-HSW-NEXT: .LBB0_7: ; X64-HSW-NEXT: leal (%rax,%rax,4), %eax ; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq -; X64-HSW-NEXT: .LBB0_12: +; X64-HSW-NEXT: .LBB0_13: ; X64-HSW-NEXT: leal (%rax,%rax,4), %ecx ; X64-HSW-NEXT: leal (%rax,%rcx,2), %eax ; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq -; X64-HSW-NEXT: .LBB0_13: +; X64-HSW-NEXT: .LBB0_15: ; X64-HSW-NEXT: shll $2, %eax ; X64-HSW-NEXT: leal (%rax,%rax,2), %eax ; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq -; X64-HSW-NEXT: .LBB0_14: +; X64-HSW-NEXT: .LBB0_16: ; X64-HSW-NEXT: leal (%rax,%rax,2), %ecx ; X64-HSW-NEXT: leal (%rax,%rcx,4), %eax ; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq -; X64-HSW-NEXT: .LBB0_15: +; X64-HSW-NEXT: .LBB0_18: ; X64-HSW-NEXT: movl %eax, %ecx ; X64-HSW-NEXT: shll $4, %ecx ; X64-HSW-NEXT: subl %eax, %ecx -; X64-HSW-NEXT: jmp .LBB0_8 -; X64-HSW-NEXT: .LBB0_17: +; X64-HSW-NEXT: jmp .LBB0_37 +; X64-HSW-NEXT: .LBB0_19: ; X64-HSW-NEXT: leal (%rax,%rax,4), %eax ; X64-HSW-NEXT: leal (%rax,%rax,2), %eax ; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq -; X64-HSW-NEXT: .LBB0_18: +; X64-HSW-NEXT: .LBB0_20: ; X64-HSW-NEXT: shll $4, %eax ; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq -; X64-HSW-NEXT: .LBB0_19: +; X64-HSW-NEXT: .LBB0_21: ; X64-HSW-NEXT: movl %eax, %ecx ; X64-HSW-NEXT: shll $4, %ecx -; X64-HSW-NEXT: jmp .LBB0_20 -; X64-HSW-NEXT: .LBB0_21: +; X64-HSW-NEXT: jmp .LBB0_34 +; X64-HSW-NEXT: .LBB0_22: ; X64-HSW-NEXT: addl %eax, %eax +; X64-HSW-NEXT: .LBB0_11: ; X64-HSW-NEXT: leal (%rax,%rax,8), %eax ; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq -; X64-HSW-NEXT: .LBB0_22: +; X64-HSW-NEXT: .LBB0_23: ; X64-HSW-NEXT: leal (%rax,%rax,8), %ecx ; X64-HSW-NEXT: leal (%rax,%rcx,2), %eax ; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq -; X64-HSW-NEXT: .LBB0_23: +; X64-HSW-NEXT: .LBB0_24: ; X64-HSW-NEXT: shll $2, %eax ; X64-HSW-NEXT: leal (%rax,%rax,4), %eax ; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq -; X64-HSW-NEXT: .LBB0_24: +; X64-HSW-NEXT: .LBB0_25: ; X64-HSW-NEXT: leal (%rax,%rax,4), %ecx ; X64-HSW-NEXT: leal (%rax,%rcx,4), %eax ; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq -; X64-HSW-NEXT: .LBB0_25: +; X64-HSW-NEXT: .LBB0_26: ; X64-HSW-NEXT: leal (%rax,%rax,4), %ecx ; X64-HSW-NEXT: leal (%rax,%rcx,4), %ecx -; X64-HSW-NEXT: jmp .LBB0_20 -; X64-HSW-NEXT: .LBB0_26: +; X64-HSW-NEXT: jmp .LBB0_34 +; X64-HSW-NEXT: .LBB0_27: ; X64-HSW-NEXT: leal (%rax,%rax,2), %ecx ; X64-HSW-NEXT: shll $3, %ecx -; X64-HSW-NEXT: jmp .LBB0_8 -; X64-HSW-NEXT: .LBB0_27: +; X64-HSW-NEXT: jmp .LBB0_37 +; X64-HSW-NEXT: .LBB0_28: ; X64-HSW-NEXT: shll $3, %eax ; X64-HSW-NEXT: leal (%rax,%rax,2), %eax ; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq -; X64-HSW-NEXT: .LBB0_28: +; X64-HSW-NEXT: .LBB0_29: ; X64-HSW-NEXT: leal (%rax,%rax,4), %eax ; X64-HSW-NEXT: leal (%rax,%rax,4), %eax ; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq -; X64-HSW-NEXT: .LBB0_29: +; X64-HSW-NEXT: .LBB0_30: ; X64-HSW-NEXT: leal (%rax,%rax,4), %ecx ; X64-HSW-NEXT: leal (%rcx,%rcx,4), %ecx -; X64-HSW-NEXT: jmp .LBB0_20 -; X64-HSW-NEXT: .LBB0_30: +; X64-HSW-NEXT: jmp .LBB0_34 +; X64-HSW-NEXT: .LBB0_31: ; X64-HSW-NEXT: leal (%rax,%rax,8), %eax ; X64-HSW-NEXT: leal (%rax,%rax,2), %eax ; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq -; X64-HSW-NEXT: .LBB0_31: +; X64-HSW-NEXT: .LBB0_32: ; X64-HSW-NEXT: leal (%rax,%rax,8), %ecx ; X64-HSW-NEXT: leal (%rcx,%rcx,2), %ecx -; X64-HSW-NEXT: jmp .LBB0_20 -; X64-HSW-NEXT: .LBB0_32: +; X64-HSW-NEXT: jmp .LBB0_34 +; X64-HSW-NEXT: .LBB0_33: ; X64-HSW-NEXT: leal (%rax,%rax,8), %ecx ; X64-HSW-NEXT: leal (%rcx,%rcx,2), %ecx ; X64-HSW-NEXT: addl %eax, %ecx -; X64-HSW-NEXT: .LBB0_20: +; X64-HSW-NEXT: .LBB0_34: ; X64-HSW-NEXT: addl %eax, %ecx ; X64-HSW-NEXT: movl %ecx, %eax ; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq -; X64-HSW-NEXT: .LBB0_33: +; X64-HSW-NEXT: .LBB0_35: ; X64-HSW-NEXT: movl %eax, %ecx ; X64-HSW-NEXT: shll $5, %ecx ; X64-HSW-NEXT: subl %eax, %ecx -; X64-HSW-NEXT: jmp .LBB0_8 -; X64-HSW-NEXT: .LBB0_34: +; X64-HSW-NEXT: jmp .LBB0_37 +; X64-HSW-NEXT: .LBB0_36: ; X64-HSW-NEXT: movl %eax, %ecx ; X64-HSW-NEXT: shll $5, %ecx -; X64-HSW-NEXT: .LBB0_8: +; X64-HSW-NEXT: .LBB0_37: ; X64-HSW-NEXT: subl %eax, %ecx ; X64-HSW-NEXT: movl %ecx, %eax ; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq -; X64-HSW-NEXT: .LBB0_35: +; X64-HSW-NEXT: .LBB0_39: ; X64-HSW-NEXT: shll $5, %eax ; X64-HSW-NEXT: # kill: def $eax killed $eax killed $rax ; X64-HSW-NEXT: retq Index: test/CodeGen/X86/tail-merge-after-mbp.mir =================================================================== --- test/CodeGen/X86/tail-merge-after-mbp.mir +++ test/CodeGen/X86/tail-merge-after-mbp.mir @@ -4,28 +4,28 @@ # check loop bb.7 is not merged with bb.10, bb.13 # check loop bb.9 is not merged with bb.12 # CHECK: bb.2: -# CHECK-NEXT: successors: %bb.3(0x30000000), %bb.4(0x50000000) +# CHECK-NEXT: successors: %bb.1(0x30000000), %bb.3(0x50000000) # CHECK: $rax = MOV64rm $r14, 1, $noreg, 0, $noreg # CHECK-NEXT: TEST64rr $rax, $rax -# CHECK-NEXT: JE_1 %bb.3 -# CHECK: bb.4: -# CHECK-NEXT: successors: %bb.5(0x30000000), %bb.10(0x50000000) +# CHECK-NEXT: JE_1 %bb.1 +# CHECK: bb.3: +# CHECK-NEXT: successors: %bb.4(0x30000000), %bb.8(0x50000000) # CHECK: CMP64mi8 killed $rax, 1, $noreg, 8, $noreg, 0 -# CHECK-NEXT: JNE_1 %bb.10 -# CHECK: bb.5: -# CHECK-NEXT: successors: %bb.6(0x30000000), %bb.7(0x50000000) +# CHECK-NEXT: JNE_1 %bb.8 +# CHECK: bb.4: +# CHECK-NEXT: successors: %bb.1(0x30000000), %bb.5(0x50000000) # CHECK: $rax = MOV64rm $r14, 1, $noreg, 0, $noreg # CHECK-NEXT: TEST64rr $rax, $rax -# CHECK-NEXT: JE_1 %bb.6 -# CHECK: bb.7 -# CHECK-NEXT: successors: %bb.8(0x71555555), %bb.10(0x0eaaaaab) +# CHECK-NEXT: JE_1 %bb.1 +# CHECK: bb.5 +# CHECK-NEXT: successors: %bb.6(0x71555555), %bb.8(0x0eaaaaab) # CHECK: CMP64mi8 killed $rax, 1, $noreg, 8, $noreg, 0 -# CHECK-NEXT: JNE_1 %bb.10 -# CHECK: bb.8: -# CHECK-NEXT: successors: %bb.9(0x04000000), %bb.7(0x7c000000) +# CHECK-NEXT: JNE_1 %bb.8 +# CHECK: bb.6: +# CHECK-NEXT: successors: %bb.1(0x04000000), %bb.5(0x7c000000) # CHECK: $rax = MOV64rm $r14, 1, $noreg, 0, $noreg # CHECK-NEXT: TEST64rr $rax, $rax -# CHECK-NEXT: JNE_1 %bb.7 +# CHECK-NEXT: JNE_1 %bb.5 name: foo body: | Index: test/CodeGen/X86/tail-opts.ll =================================================================== --- test/CodeGen/X86/tail-opts.ll +++ test/CodeGen/X86/tail-opts.ll @@ -435,9 +435,8 @@ ret void bbx: - switch i32 undef, label %bb12 [ - i32 128, label %return - ] + %1 = icmp eq i32 undef, 0 + br i1 %1, label %return, label %bb12 bb12: store volatile i32 0, i32* @XYZ Index: test/CodeGen/X86/tail-threshold.ll =================================================================== --- test/CodeGen/X86/tail-threshold.ll +++ test/CodeGen/X86/tail-threshold.ll @@ -5,7 +5,6 @@ ; CHECK: callq bar ; CHECK: callq bar -; CHECK: callq bar ; CHECK-NOT: callq declare void @bar() Index: test/CodeGen/X86/test-shrink-bug.ll =================================================================== --- test/CodeGen/X86/test-shrink-bug.ll +++ test/CodeGen/X86/test-shrink-bug.ll @@ -52,15 +52,13 @@ ; CHECK-X86-NEXT: cmpb $123, {{[0-9]+}}(%esp) ; CHECK-X86-NEXT: sete %al ; CHECK-X86-NEXT: testl $263, %ecx ## imm = 0x107 -; CHECK-X86-NEXT: je LBB1_2 +; CHECK-X86-NEXT: je LBB1_3 ; CHECK-X86-NEXT: ## %bb.1: ; CHECK-X86-NEXT: testb %al, %al -; CHECK-X86-NEXT: jne LBB1_2 -; CHECK-X86-NEXT: ## %bb.3: ## %no +; CHECK-X86-NEXT: jne LBB1_3 +; CHECK-X86-NEXT: ## %bb.2: ## %no ; CHECK-X86-NEXT: calll _bar -; CHECK-X86-NEXT: addl $12, %esp -; CHECK-X86-NEXT: retl -; CHECK-X86-NEXT: LBB1_2: ## %yes +; CHECK-X86-NEXT: LBB1_3: ## %yes ; CHECK-X86-NEXT: addl $12, %esp ; CHECK-X86-NEXT: retl ; @@ -69,7 +67,7 @@ ; CHECK-X64-NEXT: pushq %rax ; CHECK-X64-NEXT: .cfi_def_cfa_offset 16 ; CHECK-X64-NEXT: testw $263, %di # imm = 0x107 -; CHECK-X64-NEXT: je .LBB1_2 +; CHECK-X64-NEXT: je .LBB1_3 ; CHECK-X64-NEXT: # %bb.1: ; CHECK-X64-NEXT: pand {{.*}}(%rip), %xmm0 ; CHECK-X64-NEXT: pcmpeqd {{.*}}(%rip), %xmm0 @@ -77,14 +75,10 @@ ; CHECK-X64-NEXT: pand %xmm0, %xmm1 ; CHECK-X64-NEXT: pextrw $4, %xmm1, %eax ; CHECK-X64-NEXT: testb $1, %al -; CHECK-X64-NEXT: jne .LBB1_2 -; CHECK-X64-NEXT: # %bb.3: # %no +; CHECK-X64-NEXT: jne .LBB1_3 +; CHECK-X64-NEXT: # %bb.2: # %no ; CHECK-X64-NEXT: callq bar -; CHECK-X64-NEXT: popq %rax -; CHECK-X64-NEXT: .cfi_def_cfa_offset 8 -; CHECK-X64-NEXT: retq -; CHECK-X64-NEXT: .LBB1_2: # %yes -; CHECK-X64-NEXT: .cfi_def_cfa_offset 16 +; CHECK-X64-NEXT: .LBB1_3: # %yes ; CHECK-X64-NEXT: popq %rax ; CHECK-X64-NEXT: .cfi_def_cfa_offset 8 ; CHECK-X64-NEXT: retq Index: test/Transforms/LoopStrengthReduce/AArch64/small-constant.ll =================================================================== --- test/Transforms/LoopStrengthReduce/AArch64/small-constant.ll +++ test/Transforms/LoopStrengthReduce/AArch64/small-constant.ll @@ -19,25 +19,22 @@ ; CHECK-LABEL: test1: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: fmov s2, #-7.00000000 -; CHECK-NEXT: cbz x1, .LBB0_5 +; CHECK-NEXT: cbz x1, .LBB0_4 ; CHECK-NEXT: // %bb.1: // %for.body.preheader ; CHECK-NEXT: add x8, x0, #28 // =28 ; CHECK-NEXT: .LBB0_2: // %for.body ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: ldr s1, [x8, x1, lsl #2] ; CHECK-NEXT: fcmp s1, s0 -; CHECK-NEXT: b.gt .LBB0_6 +; CHECK-NEXT: b.gt .LBB0_5 ; CHECK-NEXT: // %bb.3: // %for.cond ; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1 ; CHECK-NEXT: add x1, x1, #1 // =1 ; CHECK-NEXT: cbnz x1, .LBB0_2 -; CHECK-NEXT: // %bb.4: +; CHECK-NEXT: .LBB0_4: ; CHECK-NEXT: mov v0.16b, v2.16b ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB0_5: -; CHECK-NEXT: mov v0.16b, v2.16b -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB0_6: // %cleanup2 +; CHECK-NEXT: .LBB0_5: // %cleanup2 ; CHECK-NEXT: mov v0.16b, v1.16b ; CHECK-NEXT: ret entry: @@ -68,7 +65,7 @@ ; CHECK-LABEL: test2: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: fmov s2, #-7.00000000 -; CHECK-NEXT: cbz x1, .LBB1_5 +; CHECK-NEXT: cbz x1, .LBB1_4 ; CHECK-NEXT: // %bb.1: // %for.body.preheader ; CHECK-NEXT: add x8, x0, #28 // =28 ; CHECK-NEXT: .LBB1_2: // %for.body @@ -77,18 +74,15 @@ ; CHECK-NEXT: scvtf s3, x1 ; CHECK-NEXT: fadd s3, s3, s0 ; CHECK-NEXT: fcmp s1, s3 -; CHECK-NEXT: b.gt .LBB1_6 +; CHECK-NEXT: b.gt .LBB1_5 ; CHECK-NEXT: // %bb.3: // %for.cond ; CHECK-NEXT: // in Loop: Header=BB1_2 Depth=1 ; CHECK-NEXT: add x1, x1, #1 // =1 ; CHECK-NEXT: cbnz x1, .LBB1_2 -; CHECK-NEXT: // %bb.4: -; CHECK-NEXT: mov v0.16b, v2.16b -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB1_5: +; CHECK-NEXT: .LBB1_4: ; CHECK-NEXT: mov v0.16b, v2.16b ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB1_6: // %cleanup4 +; CHECK-NEXT: .LBB1_5: // %cleanup4 ; CHECK-NEXT: mov v0.16b, v1.16b ; CHECK-NEXT: ret entry: