diff --git a/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp b/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp --- a/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp +++ b/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp @@ -120,22 +120,6 @@ [](Value *Op) { return Op->getType()->isTokenTy(); })) continue; - // Only look at the block if it is empty or the only other thing in it is a - // single PHI node that is the operand to the return. - // FIXME: lift this restriction. - if (Term != &BB.front()) { - // Check for something else in the block. - BasicBlock::iterator I(Term); - --I; - // Skip over debug info. - while (isa(I) && I != BB.begin()) - --I; - if (!isa(I) && - (!isa(I) || I != BB.begin() || Term->getNumOperands() == 0 || - Term->getOperand(0) != &*I)) - continue; - } - // Canonical blocks are uniqued based on the terminator type (opcode). Structure[Term->getOpcode()].emplace_back(&BB); } diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-condbr-lower-tree.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-condbr-lower-tree.ll --- a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-condbr-lower-tree.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-condbr-lower-tree.ll @@ -5,7 +5,7 @@ define void @or_cond(i32 %X, i32 %Y, i32 %Z) nounwind { ; CHECK-LABEL: name: or_cond ; CHECK: bb.1.entry: - ; CHECK: successors: %bb.2(0x20000000), %bb.4(0x60000000) + ; CHECK: successors: %bb.3(0x20000000), %bb.4(0x60000000) ; CHECK: liveins: $w0, $w1, $w2 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 @@ -16,17 +16,17 @@ ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY1]](s32), [[C1]] ; CHECK: [[OR:%[0-9]+]]:_(s1) = G_OR [[ICMP1]], [[ICMP]] ; CHECK: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY1]](s32), [[C1]] - ; CHECK: G_BRCOND [[ICMP2]](s1), %bb.2 + ; CHECK: G_BRCOND [[ICMP2]](s1), %bb.3 ; CHECK: G_BR %bb.4 ; CHECK: bb.4.entry: - ; CHECK: successors: %bb.2(0x2aaaaaab), %bb.3(0x55555555) + ; CHECK: successors: %bb.3(0x2aaaaaab), %bb.2(0x55555555) ; CHECK: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY]](s32), [[C]] - ; CHECK: G_BRCOND [[ICMP3]](s1), %bb.2 - ; CHECK: G_BR %bb.3 - ; CHECK: bb.2.cond_true: - ; CHECK: TCRETURNdi @bar, 0, csr_aarch64_aapcs, implicit $sp - ; CHECK: bb.3.UnifiedReturnBlock: + ; CHECK: G_BRCOND [[ICMP3]](s1), %bb.3 + ; CHECK: G_BR %bb.2 + ; CHECK: bb.2.common.ret: ; CHECK: RET_ReallyLR + ; CHECK: bb.3.cond_true: + ; CHECK: TCRETURNdi @bar, 0, csr_aarch64_aapcs, implicit $sp entry: %tmp1 = icmp eq i32 %X, 0 %tmp3 = icmp slt i32 %Y, 5 @@ -44,7 +44,7 @@ define void @or_cond_select(i32 %X, i32 %Y, i32 %Z) nounwind { ; CHECK-LABEL: name: or_cond_select ; CHECK: bb.1.entry: - ; CHECK: successors: %bb.2(0x20000000), %bb.4(0x60000000) + ; CHECK: successors: %bb.3(0x20000000), %bb.4(0x60000000) ; CHECK: liveins: $w0, $w1, $w2 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 @@ -56,17 +56,17 @@ ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY1]](s32), [[C1]] ; CHECK: [[SELECT:%[0-9]+]]:_(s1) = G_SELECT [[ICMP1]](s1), [[C2]], [[ICMP]] ; CHECK: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY1]](s32), [[C1]] - ; CHECK: G_BRCOND [[ICMP2]](s1), %bb.2 + ; CHECK: G_BRCOND [[ICMP2]](s1), %bb.3 ; CHECK: G_BR %bb.4 ; CHECK: bb.4.entry: - ; CHECK: successors: %bb.2(0x2aaaaaab), %bb.3(0x55555555) + ; CHECK: successors: %bb.3(0x2aaaaaab), %bb.2(0x55555555) ; CHECK: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY]](s32), [[C]] - ; CHECK: G_BRCOND [[ICMP3]](s1), %bb.2 - ; CHECK: G_BR %bb.3 - ; CHECK: bb.2.cond_true: - ; CHECK: TCRETURNdi @bar, 0, csr_aarch64_aapcs, implicit $sp - ; CHECK: bb.3.UnifiedReturnBlock: + ; CHECK: G_BRCOND [[ICMP3]](s1), %bb.3 + ; CHECK: G_BR %bb.2 + ; CHECK: bb.2.common.ret: ; CHECK: RET_ReallyLR + ; CHECK: bb.3.cond_true: + ; CHECK: TCRETURNdi @bar, 0, csr_aarch64_aapcs, implicit $sp entry: %tmp1 = icmp eq i32 %X, 0 %tmp3 = icmp slt i32 %Y, 5 @@ -84,7 +84,7 @@ define void @and_cond(i32 %X, i32 %Y, i32 %Z) nounwind { ; CHECK-LABEL: name: and_cond ; CHECK: bb.1.entry: - ; CHECK: successors: %bb.4(0x60000000), %bb.3(0x20000000) + ; CHECK: successors: %bb.4(0x60000000), %bb.2(0x20000000) ; CHECK: liveins: $w0, $w1, $w2 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 @@ -96,16 +96,16 @@ ; CHECK: [[AND:%[0-9]+]]:_(s1) = G_AND [[ICMP1]], [[ICMP]] ; CHECK: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY1]](s32), [[C1]] ; CHECK: G_BRCOND [[ICMP2]](s1), %bb.4 - ; CHECK: G_BR %bb.3 + ; CHECK: G_BR %bb.2 ; CHECK: bb.4.entry: - ; CHECK: successors: %bb.2(0x55555555), %bb.3(0x2aaaaaab) + ; CHECK: successors: %bb.3(0x55555555), %bb.2(0x2aaaaaab) ; CHECK: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY]](s32), [[C]] - ; CHECK: G_BRCOND [[ICMP3]](s1), %bb.2 - ; CHECK: G_BR %bb.3 - ; CHECK: bb.2.cond_true: - ; CHECK: TCRETURNdi @bar, 0, csr_aarch64_aapcs, implicit $sp - ; CHECK: bb.3.UnifiedReturnBlock: + ; CHECK: G_BRCOND [[ICMP3]](s1), %bb.3 + ; CHECK: G_BR %bb.2 + ; CHECK: bb.2.common.ret: ; CHECK: RET_ReallyLR + ; CHECK: bb.3.cond_true: + ; CHECK: TCRETURNdi @bar, 0, csr_aarch64_aapcs, implicit $sp entry: %tmp1 = icmp eq i32 %X, 0 %tmp3 = icmp slt i32 %Y, 5 @@ -123,7 +123,7 @@ define void @and_cond_select(i32 %X, i32 %Y, i32 %Z) nounwind { ; CHECK-LABEL: name: and_cond_select ; CHECK: bb.1.entry: - ; CHECK: successors: %bb.4(0x60000000), %bb.3(0x20000000) + ; CHECK: successors: %bb.4(0x60000000), %bb.2(0x20000000) ; CHECK: liveins: $w0, $w1, $w2 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 @@ -136,16 +136,16 @@ ; CHECK: [[SELECT:%[0-9]+]]:_(s1) = G_SELECT [[ICMP1]](s1), [[ICMP]], [[C2]] ; CHECK: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY1]](s32), [[C1]] ; CHECK: G_BRCOND [[ICMP2]](s1), %bb.4 - ; CHECK: G_BR %bb.3 + ; CHECK: G_BR %bb.2 ; CHECK: bb.4.entry: - ; CHECK: successors: %bb.2(0x55555555), %bb.3(0x2aaaaaab) + ; CHECK: successors: %bb.3(0x55555555), %bb.2(0x2aaaaaab) ; CHECK: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY]](s32), [[C]] - ; CHECK: G_BRCOND [[ICMP3]](s1), %bb.2 - ; CHECK: G_BR %bb.3 - ; CHECK: bb.2.cond_true: - ; CHECK: TCRETURNdi @bar, 0, csr_aarch64_aapcs, implicit $sp - ; CHECK: bb.3.UnifiedReturnBlock: + ; CHECK: G_BRCOND [[ICMP3]](s1), %bb.3 + ; CHECK: G_BR %bb.2 + ; CHECK: bb.2.common.ret: ; CHECK: RET_ReallyLR + ; CHECK: bb.3.cond_true: + ; CHECK: TCRETURNdi @bar, 0, csr_aarch64_aapcs, implicit $sp entry: %tmp1 = icmp eq i32 %X, 0 %tmp3 = icmp slt i32 %Y, 5 @@ -164,7 +164,7 @@ define void @or_cond_same_values_cmp(i32 %X, i32 %Y, i32 %Z) nounwind { ; CHECK-LABEL: name: or_cond_same_values_cmp ; CHECK: bb.1.entry: - ; CHECK: successors: %bb.2(0x40000000), %bb.3(0x40000000) + ; CHECK: successors: %bb.3(0x40000000), %bb.2(0x40000000) ; CHECK: liveins: $w0, $w1, $w2 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 @@ -173,12 +173,12 @@ ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY]](s32), [[C]] ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY]](s32), [[C]] ; CHECK: [[OR:%[0-9]+]]:_(s1) = G_OR [[ICMP1]], [[ICMP]] - ; CHECK: G_BRCOND [[OR]](s1), %bb.2 - ; CHECK: G_BR %bb.3 - ; CHECK: bb.2.cond_true: - ; CHECK: TCRETURNdi @bar, 0, csr_aarch64_aapcs, implicit $sp - ; CHECK: bb.3.UnifiedReturnBlock: + ; CHECK: G_BRCOND [[OR]](s1), %bb.3 + ; CHECK: G_BR %bb.2 + ; CHECK: bb.2.common.ret: ; CHECK: RET_ReallyLR + ; CHECK: bb.3.cond_true: + ; CHECK: TCRETURNdi @bar, 0, csr_aarch64_aapcs, implicit $sp entry: %tmp1 = icmp eq i32 %X, 5 %tmp3 = icmp slt i32 %X, 5 @@ -197,7 +197,7 @@ define void @or_cond_multiple_cases(i32 %X, i32 %Y, i32 %Z) nounwind { ; CHECK-LABEL: name: or_cond_multiple_cases ; CHECK: bb.1.entry: - ; CHECK: successors: %bb.2(0x10000000), %bb.5(0x70000000) + ; CHECK: successors: %bb.3(0x10000000), %bb.5(0x70000000) ; CHECK: liveins: $w0, $w1, $w2 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 @@ -209,22 +209,22 @@ ; CHECK: [[OR:%[0-9]+]]:_(s1) = G_OR [[ICMP1]], [[ICMP]] ; CHECK: [[OR1:%[0-9]+]]:_(s1) = G_OR [[OR]], [[ICMP2]] ; CHECK: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY]](s32), [[C]] - ; CHECK: G_BRCOND [[ICMP3]](s1), %bb.2 + ; CHECK: G_BRCOND [[ICMP3]](s1), %bb.3 ; CHECK: G_BR %bb.5 ; CHECK: bb.5.entry: - ; CHECK: successors: %bb.2(0x12492492), %bb.4(0x6db6db6e) + ; CHECK: successors: %bb.3(0x12492492), %bb.4(0x6db6db6e) ; CHECK: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY]](s32), [[C]] - ; CHECK: G_BRCOND [[ICMP4]](s1), %bb.2 + ; CHECK: G_BRCOND [[ICMP4]](s1), %bb.3 ; CHECK: G_BR %bb.4 ; CHECK: bb.4.entry: - ; CHECK: successors: %bb.2(0x2aaaaaab), %bb.3(0x55555555) + ; CHECK: successors: %bb.3(0x2aaaaaab), %bb.2(0x55555555) ; CHECK: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]] - ; CHECK: G_BRCOND [[ICMP5]](s1), %bb.2 - ; CHECK: G_BR %bb.3 - ; CHECK: bb.2.cond_true: - ; CHECK: TCRETURNdi @bar, 0, csr_aarch64_aapcs, implicit $sp - ; CHECK: bb.3.UnifiedReturnBlock: + ; CHECK: G_BRCOND [[ICMP5]](s1), %bb.3 + ; CHECK: G_BR %bb.2 + ; CHECK: bb.2.common.ret: ; CHECK: RET_ReallyLR + ; CHECK: bb.3.cond_true: + ; CHECK: TCRETURNdi @bar, 0, csr_aarch64_aapcs, implicit $sp entry: %tmp1 = icmp eq i32 %X, 5 %tmp3 = icmp slt i32 %X, 5 @@ -246,7 +246,7 @@ define void @or_cond_ne_null(i32 %X, i32 %Y, i32 %Z) nounwind { ; CHECK-LABEL: name: or_cond_ne_null ; CHECK: bb.1.entry: - ; CHECK: successors: %bb.2(0x40000000), %bb.3(0x40000000) + ; CHECK: successors: %bb.3(0x40000000), %bb.2(0x40000000) ; CHECK: liveins: $w0, $w1, $w2 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 @@ -255,12 +255,12 @@ ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY]](s32), [[C]] ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY1]](s32), [[C]] ; CHECK: [[OR:%[0-9]+]]:_(s1) = G_OR [[ICMP1]], [[ICMP]] - ; CHECK: G_BRCOND [[OR]](s1), %bb.2 - ; CHECK: G_BR %bb.3 - ; CHECK: bb.2.cond_true: - ; CHECK: TCRETURNdi @bar, 0, csr_aarch64_aapcs, implicit $sp - ; CHECK: bb.3.UnifiedReturnBlock: + ; CHECK: G_BRCOND [[OR]](s1), %bb.3 + ; CHECK: G_BR %bb.2 + ; CHECK: bb.2.common.ret: ; CHECK: RET_ReallyLR + ; CHECK: bb.3.cond_true: + ; CHECK: TCRETURNdi @bar, 0, csr_aarch64_aapcs, implicit $sp entry: %tmp1 = icmp ne i32 %X, 0 %tmp3 = icmp ne i32 %Y, 0 @@ -281,7 +281,7 @@ define void @unpredictable(i32 %X, i32 %Y, i32 %Z) nounwind { ; CHECK-LABEL: name: unpredictable ; CHECK: bb.1.entry: - ; CHECK: successors: %bb.2(0x40000000), %bb.3(0x40000000) + ; CHECK: successors: %bb.3(0x40000000), %bb.2(0x40000000) ; CHECK: liveins: $w0, $w1, $w2 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1 @@ -291,12 +291,12 @@ ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY]](s32), [[C]] ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY1]](s32), [[C1]] ; CHECK: [[OR:%[0-9]+]]:_(s1) = G_OR [[ICMP1]], [[ICMP]] - ; CHECK: G_BRCOND [[OR]](s1), %bb.2 - ; CHECK: G_BR %bb.3 - ; CHECK: bb.2.cond_true: - ; CHECK: TCRETURNdi @bar, 0, csr_aarch64_aapcs, implicit $sp - ; CHECK: bb.3.UnifiedReturnBlock: + ; CHECK: G_BRCOND [[OR]](s1), %bb.3 + ; CHECK: G_BR %bb.2 + ; CHECK: bb.2.common.ret: ; CHECK: RET_ReallyLR + ; CHECK: bb.3.cond_true: + ; CHECK: TCRETURNdi @bar, 0, csr_aarch64_aapcs, implicit $sp entry: %tmp1 = icmp eq i32 %X, 0 %tmp3 = icmp slt i32 %Y, 5 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/swifterror.ll b/llvm/test/CodeGen/AArch64/GlobalISel/swifterror.ll --- a/llvm/test/CodeGen/AArch64/GlobalISel/swifterror.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/swifterror.ll @@ -151,23 +151,28 @@ define float @foo_if(%swift_error** swifterror %error_ptr_ref, i32 %cc) { ; CHECK-LABEL: foo_if: ; CHECK: ; %bb.0: ; %entry -; CHECK-NEXT: cbz w0, LBB3_2 -; CHECK-NEXT: ; %bb.1: ; %gen_error -; CHECK-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill -; CHECK-NEXT: mov x29, sp +; CHECK-NEXT: str d8, [sp, #-32]! ; 8-byte Folded Spill +; CHECK-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill +; CHECK-NEXT: add x29, sp, #16 ; =16 ; CHECK-NEXT: .cfi_def_cfa w29, 16 ; CHECK-NEXT: .cfi_offset w30, -8 ; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: .cfi_offset b8, -32 +; CHECK-NEXT: cbz w0, LBB3_2 +; CHECK-NEXT: ; %bb.1: ; %gen_error +; CHECK-NEXT: fmov s8, #1.00000000 ; CHECK-NEXT: mov w0, #16 ; CHECK-NEXT: bl _malloc -; CHECK-NEXT: mov w8, #1 -; CHECK-NEXT: fmov s0, #1.00000000 -; CHECK-NEXT: strb w8, [x0, #8] ; CHECK-NEXT: mov x21, x0 -; CHECK-NEXT: ldp x29, x30, [sp], #16 ; 16-byte Folded Reload -; CHECK-NEXT: ret -; CHECK-NEXT: LBB3_2: ; %normal -; CHECK-NEXT: movi d0, #0000000000000000 +; CHECK-NEXT: mov w8, #1 +; CHECK-NEXT: strb w8, [x0, #8] +; CHECK-NEXT: b LBB3_3 +; CHECK-NEXT: LBB3_2: +; CHECK-NEXT: movi d8, #0000000000000000 +; CHECK-NEXT: LBB3_3: ; %common.ret +; CHECK-NEXT: mov.16b v0, v8 +; CHECK-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload +; CHECK-NEXT: ldr d8, [sp], #32 ; 8-byte Folded Reload ; CHECK-NEXT: ret entry: %cond = icmp ne i32 %cc, 0 diff --git a/llvm/test/CodeGen/AArch64/addsub.ll b/llvm/test/CodeGen/AArch64/addsub.ll --- a/llvm/test/CodeGen/AArch64/addsub.ll +++ b/llvm/test/CodeGen/AArch64/addsub.ll @@ -177,13 +177,13 @@ ; CHECK-NEXT: b.gt .LBB5_6 ; CHECK-NEXT: // %bb.4: // %test5 ; CHECK-NEXT: add w11, w9, #4 // =4 -; CHECK-NEXT: cmn w10, #444 // =444 +; CHECK-NEXT: cmn w10, #443 // =443 ; CHECK-NEXT: str w11, [x8] -; CHECK-NEXT: b.gt .LBB5_6 +; CHECK-NEXT: b.ge .LBB5_6 ; CHECK-NEXT: // %bb.5: // %test6 ; CHECK-NEXT: add w9, w9, #5 // =5 ; CHECK-NEXT: str w9, [x8] -; CHECK-NEXT: .LBB5_6: // %ret +; CHECK-NEXT: .LBB5_6: // %common.ret ; CHECK-NEXT: ret %val = load i32, i32* @var_i32 %val2 = load i32, i32* @var2_i32 diff --git a/llvm/test/CodeGen/AArch64/branch-relax-alignment.ll b/llvm/test/CodeGen/AArch64/branch-relax-alignment.ll --- a/llvm/test/CodeGen/AArch64/branch-relax-alignment.ll +++ b/llvm/test/CodeGen/AArch64/branch-relax-alignment.ll @@ -6,19 +6,12 @@ define i32 @invert_bcc_block_align_higher_func(i32 %x, i32 %y) align 4 #0 { ; CHECK-LABEL: invert_bcc_block_align_higher_func: -; CHECK: ; %bb.0: +; CHECK: ; %bb.0: ; %common.ret ; CHECK-NEXT: cmp w0, w1 -; CHECK-NEXT: b.eq LBB0_1 -; CHECK-NEXT: b LBB0_2 -; CHECK-NEXT: LBB0_1: ; %bb1 -; CHECK-NEXT: mov w8, #42 -; CHECK-NEXT: mov w0, wzr -; CHECK-NEXT: str w8, [x8] -; CHECK-NEXT: ret -; CHECK-NEXT: .p2align 4 -; CHECK-NEXT: LBB0_2: ; %bb2 ; CHECK-NEXT: mov w8, #9 -; CHECK-NEXT: mov w0, #1 +; CHECK-NEXT: mov w9, #42 +; CHECK-NEXT: cset w0, ne +; CHECK-NEXT: csel w8, w9, w8, eq ; CHECK-NEXT: str w8, [x8] ; CHECK-NEXT: ret %1 = icmp eq i32 %x, %y diff --git a/llvm/test/CodeGen/AArch64/branch-relax-asm.ll b/llvm/test/CodeGen/AArch64/branch-relax-asm.ll --- a/llvm/test/CodeGen/AArch64/branch-relax-asm.ll +++ b/llvm/test/CodeGen/AArch64/branch-relax-asm.ll @@ -7,9 +7,11 @@ ; condition. ; CHECK-LABEL: test_asm_length: ; CHECK: ; %bb.0: -; CHECK-NEXT: tbz w0, #0, LBB0_1 -; CHECK-NEXT: b LBB0_2 -; CHECK-NEXT: LBB0_1: ; %true +; CHECK-NEXT: tbz w0, #0, LBB0_2 +; CHECK-NEXT: ; %bb.1: +; CHECK-NEXT: mov w0, wzr +; CHECK-NEXT: ret +; CHECK-NEXT: LBB0_2: ; %true ; CHECK-NEXT: mov w0, #4 ; CHECK-NEXT: ; InlineAsm Start ; CHECK-NEXT: nop @@ -19,9 +21,6 @@ ; CHECK-NEXT: nop ; CHECK-NEXT: nop ; CHECK-NEXT: ; InlineAsm End -; CHECK-NEXT: ret -; CHECK-NEXT: LBB0_2: ; %false -; CHECK-NEXT: mov w0, wzr ; CHECK-NEXT: ret %val = and i32 %in, 1 %tst = icmp eq i32 %val, 0 diff --git a/llvm/test/CodeGen/AArch64/branch-relax-bcc.ll b/llvm/test/CodeGen/AArch64/branch-relax-bcc.ll --- a/llvm/test/CodeGen/AArch64/branch-relax-bcc.ll +++ b/llvm/test/CodeGen/AArch64/branch-relax-bcc.ll @@ -4,25 +4,24 @@ define i32 @invert_bcc(float %x, float %y) #0 { ; CHECK-LABEL: invert_bcc: ; CHECK: ; %bb.0: +; CHECK-NEXT: mov w0, wzr ; CHECK-NEXT: fcmp s0, s1 -; CHECK-NEXT: b.ne LBB0_3 +; CHECK-NEXT: mov w8, #42 +; CHECK-NEXT: b.pl LBB0_3 ; CHECK-NEXT: b LBB0_2 ; CHECK-NEXT: LBB0_3: -; CHECK-NEXT: b.vc LBB0_1 -; CHECK-NEXT: b LBB0_2 -; CHECK-NEXT: LBB0_1: ; %bb2 -; CHECK-NEXT: mov w8, #9 +; CHECK-NEXT: b.gt LBB0_2 +; CHECK-NEXT: ; %bb.1: ; %common.ret +; CHECK-NEXT: str w8, [x8] +; CHECK-NEXT: ret +; CHECK-NEXT: LBB0_2: ; %bb2 ; CHECK-NEXT: mov w0, #1 +; CHECK-NEXT: mov w8, #9 ; CHECK-NEXT: ; InlineAsm Start ; CHECK-NEXT: nop ; CHECK-NEXT: nop ; CHECK-NEXT: ; InlineAsm End ; CHECK-NEXT: str w8, [x8] -; CHECK-NEXT: ret -; CHECK-NEXT: LBB0_2: ; %bb1 -; CHECK-NEXT: mov w8, #42 -; CHECK-NEXT: mov w0, wzr -; CHECK-NEXT: str w8, [x8] ; CHECK-NEXT: ret %1 = fcmp ueq float %x, %y br i1 %1, label %bb1, label %bb2 diff --git a/llvm/test/CodeGen/AArch64/branch-relax-cbz.ll b/llvm/test/CodeGen/AArch64/branch-relax-cbz.ll --- a/llvm/test/CodeGen/AArch64/branch-relax-cbz.ll +++ b/llvm/test/CodeGen/AArch64/branch-relax-cbz.ll @@ -12,7 +12,7 @@ ; CHECK-NEXT: ldr w8, [x8] ; CHECK-NEXT: cbnz w8, LBB0_2 ; CHECK-NEXT: b LBB0_4 -; CHECK-NEXT: LBB0_2: ; %b8 +; CHECK-NEXT: LBB0_2: ; %common.ret ; CHECK-NEXT: ldp x29, x30, [sp], #16 ; 16-byte Folded Reload ; CHECK-NEXT: ret ; CHECK-NEXT: LBB0_3: ; %b2 diff --git a/llvm/test/CodeGen/AArch64/cgp-usubo.ll b/llvm/test/CodeGen/AArch64/cgp-usubo.ll --- a/llvm/test/CodeGen/AArch64/cgp-usubo.ll +++ b/llvm/test/CodeGen/AArch64/cgp-usubo.ll @@ -126,10 +126,9 @@ ; CHECK-NEXT: tbz w3, #0, .LBB7_2 ; CHECK-NEXT: // %bb.1: // %t ; CHECK-NEXT: subs x8, x0, x1 -; CHECK-NEXT: cset w0, lo +; CHECK-NEXT: cset w3, lo ; CHECK-NEXT: str x8, [x2] -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB7_2: // %f +; CHECK-NEXT: .LBB7_2: // %common.ret ; CHECK-NEXT: and w0, w3, #0x1 ; CHECK-NEXT: ret entry: @@ -151,30 +150,29 @@ define i1 @usubo_ult_cmp_dominates_i64(i64 %x, i64 %y, i64* %p, i1 %cond) nounwind { ; CHECK-LABEL: usubo_ult_cmp_dominates_i64: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: str x30, [sp, #-48]! // 8-byte Folded Spill +; CHECK-NEXT: stp x30, x23, [sp, #-48]! // 16-byte Folded Spill ; CHECK-NEXT: stp x20, x19, [sp, #32] // 16-byte Folded Spill -; CHECK-NEXT: mov w20, w3 +; CHECK-NEXT: mov w19, w3 ; CHECK-NEXT: stp x22, x21, [sp, #16] // 16-byte Folded Spill ; CHECK-NEXT: tbz w3, #0, .LBB8_3 ; CHECK-NEXT: // %bb.1: // %t ; CHECK-NEXT: cmp x0, x1 -; CHECK-NEXT: mov x22, x0 -; CHECK-NEXT: cset w0, lo -; CHECK-NEXT: mov x19, x2 -; CHECK-NEXT: mov x21, x1 +; CHECK-NEXT: cset w21, lo +; CHECK-NEXT: mov x23, x0 +; CHECK-NEXT: mov w0, w21 +; CHECK-NEXT: mov x20, x2 +; CHECK-NEXT: mov x22, x1 ; CHECK-NEXT: bl call -; CHECK-NEXT: subs x8, x22, x21 +; CHECK-NEXT: subs x8, x23, x22 ; CHECK-NEXT: b.hs .LBB8_3 ; CHECK-NEXT: // %bb.2: // %end -; CHECK-NEXT: cset w0, lo -; CHECK-NEXT: str x8, [x19] -; CHECK-NEXT: b .LBB8_4 -; CHECK-NEXT: .LBB8_3: // %f -; CHECK-NEXT: and w0, w20, #0x1 -; CHECK-NEXT: .LBB8_4: // %f +; CHECK-NEXT: mov w19, w21 +; CHECK-NEXT: str x8, [x20] +; CHECK-NEXT: .LBB8_3: // %common.ret +; CHECK-NEXT: and w0, w19, #0x1 ; CHECK-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload ; CHECK-NEXT: ldp x22, x21, [sp, #16] // 16-byte Folded Reload -; CHECK-NEXT: ldr x30, [sp], #48 // 8-byte Folded Reload +; CHECK-NEXT: ldp x30, x23, [sp], #48 // 16-byte Folded Reload ; CHECK-NEXT: ret entry: br i1 %cond, label %t, label %f diff --git a/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll b/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll --- a/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll +++ b/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll @@ -13,13 +13,10 @@ define i32 @f_i8_sign_extend_inreg(i8 %in, i32 %a, i32 %b) nounwind { ; CHECK-LABEL: f_i8_sign_extend_inreg: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: tbnz w0, #7, .LBB0_2 -; CHECK-NEXT: // %bb.1: // %A -; CHECK-NEXT: add w0, w8, w1 -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB0_2: // %B -; CHECK-NEXT: add w0, w8, w2 +; CHECK-NEXT: sxtb w8, w0 +; CHECK-NEXT: cmp w8, #0 // =0 +; CHECK-NEXT: csel w8, w1, w2, ge +; CHECK-NEXT: add w0, w8, w0, uxtb ; CHECK-NEXT: ret entry: %cmp = icmp sgt i8 %in, -1 @@ -38,13 +35,10 @@ define i32 @f_i16_sign_extend_inreg(i16 %in, i32 %a, i32 %b) nounwind { ; CHECK-LABEL: f_i16_sign_extend_inreg: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: tbnz w0, #15, .LBB1_2 -; CHECK-NEXT: // %bb.1: // %A -; CHECK-NEXT: add w0, w8, w1 -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB1_2: // %B -; CHECK-NEXT: add w0, w8, w2 +; CHECK-NEXT: sxth w8, w0 +; CHECK-NEXT: cmp w8, #0 // =0 +; CHECK-NEXT: csel w8, w1, w2, ge +; CHECK-NEXT: add w0, w8, w0, uxth ; CHECK-NEXT: ret entry: %cmp = icmp sgt i16 %in, -1 @@ -63,13 +57,9 @@ define i64 @f_i32_sign_extend_inreg(i32 %in, i64 %a, i64 %b) nounwind { ; CHECK-LABEL: f_i32_sign_extend_inreg: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: mov w8, w0 -; CHECK-NEXT: tbnz w0, #31, .LBB2_2 -; CHECK-NEXT: // %bb.1: // %A -; CHECK-NEXT: add x0, x8, x1 -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB2_2: // %B -; CHECK-NEXT: add x0, x8, x2 +; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: csel x8, x1, x2, ge +; CHECK-NEXT: add x0, x8, w0, uxtw ; CHECK-NEXT: ret entry: %cmp = icmp sgt i32 %in, -1 @@ -88,13 +78,10 @@ define i32 @g_i8_sign_extend_inreg(i8 %in, i32 %a, i32 %b) nounwind { ; CHECK-LABEL: g_i8_sign_extend_inreg: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: and w8, w0, #0xff -; CHECK-NEXT: tbnz w0, #7, .LBB3_2 -; CHECK-NEXT: // %bb.1: // %B -; CHECK-NEXT: add w0, w8, w2 -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB3_2: // %A -; CHECK-NEXT: add w0, w8, w1 +; CHECK-NEXT: sxtb w8, w0 +; CHECK-NEXT: cmp w8, #0 // =0 +; CHECK-NEXT: csel w8, w1, w2, lt +; CHECK-NEXT: add w0, w8, w0, uxtb ; CHECK-NEXT: ret entry: %cmp = icmp slt i8 %in, 0 @@ -113,13 +100,10 @@ define i32 @g_i16_sign_extend_inreg(i16 %in, i32 %a, i32 %b) nounwind { ; CHECK-LABEL: g_i16_sign_extend_inreg: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: and w8, w0, #0xffff -; CHECK-NEXT: tbnz w0, #15, .LBB4_2 -; CHECK-NEXT: // %bb.1: // %B -; CHECK-NEXT: add w0, w8, w2 -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB4_2: // %A -; CHECK-NEXT: add w0, w8, w1 +; CHECK-NEXT: sxth w8, w0 +; CHECK-NEXT: cmp w8, #0 // =0 +; CHECK-NEXT: csel w8, w1, w2, lt +; CHECK-NEXT: add w0, w8, w0, uxth ; CHECK-NEXT: ret entry: %cmp = icmp slt i16 %in, 0 @@ -138,13 +122,9 @@ define i64 @g_i32_sign_extend_inreg(i32 %in, i64 %a, i64 %b) nounwind { ; CHECK-LABEL: g_i32_sign_extend_inreg: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: mov w8, w0 -; CHECK-NEXT: tbnz w0, #31, .LBB5_2 -; CHECK-NEXT: // %bb.1: // %B -; CHECK-NEXT: add x0, x8, x2 -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB5_2: // %A -; CHECK-NEXT: add x0, x8, x1 +; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: csel x8, x1, x2, lt +; CHECK-NEXT: add x0, x8, w0, uxtw ; CHECK-NEXT: ret entry: %cmp = icmp slt i32 %in, 0 @@ -163,13 +143,11 @@ define i64 @f_i32_sign_extend_i64(i32 %in, i64 %a, i64 %b) nounwind { ; CHECK-LABEL: f_i32_sign_extend_i64: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: mov w8, w0 -; CHECK-NEXT: tbnz w0, #31, .LBB6_2 -; CHECK-NEXT: // %bb.1: // %A -; CHECK-NEXT: add x0, x8, x1 -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB6_2: // %B -; CHECK-NEXT: add x0, x8, x2 +; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 +; CHECK-NEXT: sxtw x8, w0 +; CHECK-NEXT: cmp x8, #0 // =0 +; CHECK-NEXT: csel x8, x1, x2, ge +; CHECK-NEXT: add x0, x8, w0, uxtw ; CHECK-NEXT: ret entry: %inext = sext i32 %in to i64 @@ -189,13 +167,11 @@ define i64 @g_i32_sign_extend_i64(i32 %in, i64 %a, i64 %b) nounwind { ; CHECK-LABEL: g_i32_sign_extend_i64: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: mov w8, w0 -; CHECK-NEXT: tbnz w0, #31, .LBB7_2 -; CHECK-NEXT: // %bb.1: // %B -; CHECK-NEXT: add x0, x8, x2 -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB7_2: // %A -; CHECK-NEXT: add x0, x8, x1 +; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 +; CHECK-NEXT: sxtw x8, w0 +; CHECK-NEXT: cmp x8, #0 // =0 +; CHECK-NEXT: csel x8, x1, x2, lt +; CHECK-NEXT: add x0, x8, w0, uxtw ; CHECK-NEXT: ret entry: %inext = sext i32 %in to i64 diff --git a/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll b/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll --- a/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll +++ b/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll @@ -716,24 +716,16 @@ define void @cmp_shifted(i32 %in, i32 %lhs, i32 %rhs) { ; CHECK-LABEL: cmp_shifted: -; CHECK: // %bb.0: +; CHECK: // %bb.0: // %common.ret ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK-NEXT: cmp w0, #0 // =0 +; CHECK-NEXT: mov w8, #42 +; CHECK-NEXT: csinc w8, w8, wzr, gt ; CHECK-NEXT: cmp w0, #2, lsl #12 // =8192 -; CHECK-NEXT: b.lt .LBB10_2 -; CHECK-NEXT: // %bb.1: // %true -; CHECK-NEXT: mov w0, #128 -; CHECK-NEXT: b .LBB10_5 -; CHECK-NEXT: .LBB10_2: // %false -; CHECK-NEXT: cmp w0, #1 // =1 -; CHECK-NEXT: b.lt .LBB10_4 -; CHECK-NEXT: // %bb.3: // %truer -; CHECK-NEXT: mov w0, #42 -; CHECK-NEXT: b .LBB10_5 -; CHECK-NEXT: .LBB10_4: // %falser -; CHECK-NEXT: mov w0, #1 -; CHECK-NEXT: .LBB10_5: // %true +; CHECK-NEXT: mov w9, #128 +; CHECK-NEXT: csel w0, w9, w8, ge ; CHECK-NEXT: bl zoo ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/cond-br-tuning.ll b/llvm/test/CodeGen/AArch64/cond-br-tuning.ll --- a/llvm/test/CodeGen/AArch64/cond-br-tuning.ll +++ b/llvm/test/CodeGen/AArch64/cond-br-tuning.ll @@ -8,14 +8,9 @@ define void @test_add_cbz(i32 %a, i32 %b, i32* %ptr) { ; CHECK-LABEL: test_add_cbz: -; CHECK: // %bb.0: +; CHECK: // %bb.0: // %common.ret ; CHECK-NEXT: cmn w0, w1 -; CHECK-NEXT: b.eq .LBB0_2 -; CHECK-NEXT: // %bb.1: // %L1 -; CHECK-NEXT: str wzr, [x2] -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB0_2: // %L2 -; CHECK-NEXT: mov w8, #1 +; CHECK-NEXT: cset w8, eq ; CHECK-NEXT: str w8, [x2] ; CHECK-NEXT: ret %c = add nsw i32 %a, %b @@ -31,13 +26,9 @@ define void @test_add_cbz_multiple_use(i32 %a, i32 %b, i32* %ptr) { ; CHECK-LABEL: test_add_cbz_multiple_use: -; CHECK: // %bb.0: +; CHECK: // %bb.0: // %common.ret ; CHECK-NEXT: adds w8, w0, w1 -; CHECK-NEXT: b.eq .LBB1_2 -; CHECK-NEXT: // %bb.1: // %L1 -; CHECK-NEXT: str wzr, [x2] -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB1_2: // %L2 +; CHECK-NEXT: csel w8, wzr, w8, ne ; CHECK-NEXT: str w8, [x2] ; CHECK-NEXT: ret %c = add nsw i32 %a, %b @@ -53,14 +44,9 @@ define void @test_add_cbz_64(i64 %a, i64 %b, i64* %ptr) { ; CHECK-LABEL: test_add_cbz_64: -; CHECK: // %bb.0: +; CHECK: // %bb.0: // %common.ret ; CHECK-NEXT: cmn x0, x1 -; CHECK-NEXT: b.eq .LBB2_2 -; CHECK-NEXT: // %bb.1: // %L1 -; CHECK-NEXT: str xzr, [x2] -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB2_2: // %L2 -; CHECK-NEXT: mov w8, #1 +; CHECK-NEXT: cset w8, eq ; CHECK-NEXT: str x8, [x2] ; CHECK-NEXT: ret %c = add nsw i64 %a, %b @@ -76,14 +62,9 @@ define void @test_and_cbz(i32 %a, i32* %ptr) { ; CHECK-LABEL: test_and_cbz: -; CHECK: // %bb.0: +; CHECK: // %bb.0: // %common.ret ; CHECK-NEXT: tst w0, #0x6 -; CHECK-NEXT: b.eq .LBB3_2 -; CHECK-NEXT: // %bb.1: // %L1 -; CHECK-NEXT: str wzr, [x1] -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB3_2: // %L2 -; CHECK-NEXT: mov w8, #1 +; CHECK-NEXT: cset w8, eq ; CHECK-NEXT: str w8, [x1] ; CHECK-NEXT: ret %c = and i32 %a, 6 @@ -99,14 +80,9 @@ define void @test_bic_cbnz(i32 %a, i32 %b, i32* %ptr) { ; CHECK-LABEL: test_bic_cbnz: -; CHECK: // %bb.0: +; CHECK: // %bb.0: // %common.ret ; CHECK-NEXT: bics wzr, w1, w0 -; CHECK-NEXT: b.ne .LBB4_2 -; CHECK-NEXT: // %bb.1: // %L1 -; CHECK-NEXT: str wzr, [x2] -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB4_2: // %L2 -; CHECK-NEXT: mov w8, #1 +; CHECK-NEXT: cset w8, ne ; CHECK-NEXT: str w8, [x2] ; CHECK-NEXT: ret %c = and i32 %a, %b diff --git a/llvm/test/CodeGen/AArch64/fast-isel-branch-cond-split.ll b/llvm/test/CodeGen/AArch64/fast-isel-branch-cond-split.ll --- a/llvm/test/CodeGen/AArch64/fast-isel-branch-cond-split.ll +++ b/llvm/test/CodeGen/AArch64/fast-isel-branch-cond-split.ll @@ -4,11 +4,13 @@ define i64 @test_or(i32 %a, i32 %b) { ; CHECK-LABEL: test_or: ; CHECK: ; %bb.0: ; %bb1 -; CHECK-NEXT: cbnz w0, LBB0_2 -; CHECK-NEXT: LBB0_1: ; %bb3 +; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov x0, xzr +; CHECK-NEXT: cbnz w8, LBB0_2 +; CHECK-NEXT: LBB0_1: ; %common.ret ; CHECK-NEXT: ret ; CHECK-NEXT: LBB0_2: ; %bb1.cond.split +; CHECK-NEXT: mov x0, xzr ; CHECK-NEXT: cbz w1, LBB0_1 ; CHECK-NEXT: ; %bb.3: ; %bb4 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill @@ -35,11 +37,13 @@ define i64 @test_or_select(i32 %a, i32 %b) { ; CHECK-LABEL: test_or_select: ; CHECK: ; %bb.0: ; %bb1 -; CHECK-NEXT: cbnz w0, LBB1_2 -; CHECK-NEXT: LBB1_1: ; %bb3 +; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov x0, xzr +; CHECK-NEXT: cbnz w8, LBB1_2 +; CHECK-NEXT: LBB1_1: ; %common.ret ; CHECK-NEXT: ret ; CHECK-NEXT: LBB1_2: ; %bb1.cond.split +; CHECK-NEXT: mov x0, xzr ; CHECK-NEXT: cbz w1, LBB1_1 ; CHECK-NEXT: ; %bb.3: ; %bb4 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill @@ -66,11 +70,13 @@ define i64 @test_and(i32 %a, i32 %b) { ; CHECK-LABEL: test_and: ; CHECK: ; %bb.0: ; %bb1 -; CHECK-NEXT: cbnz w0, LBB2_2 -; CHECK-NEXT: LBB2_1: ; %bb3 +; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov x0, xzr +; CHECK-NEXT: cbnz w8, LBB2_2 +; CHECK-NEXT: LBB2_1: ; %common.ret ; CHECK-NEXT: ret ; CHECK-NEXT: LBB2_2: ; %bb1.cond.split +; CHECK-NEXT: mov x0, xzr ; CHECK-NEXT: cbz w1, LBB2_1 ; CHECK-NEXT: ; %bb.3: ; %bb4 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill @@ -97,11 +103,13 @@ define i64 @test_and_select(i32 %a, i32 %b) { ; CHECK-LABEL: test_and_select: ; CHECK: ; %bb.0: ; %bb1 -; CHECK-NEXT: cbnz w0, LBB3_2 -; CHECK-NEXT: LBB3_1: ; %bb3 +; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov x0, xzr +; CHECK-NEXT: cbnz w8, LBB3_2 +; CHECK-NEXT: LBB3_1: ; %common.ret ; CHECK-NEXT: ret ; CHECK-NEXT: LBB3_2: ; %bb1.cond.split +; CHECK-NEXT: mov x0, xzr ; CHECK-NEXT: cbz w1, LBB3_1 ; CHECK-NEXT: ; %bb.3: ; %bb4 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill @@ -135,6 +143,7 @@ ; CHECK-NEXT: cmp w1, #0 ; =0 ; CHECK-NEXT: cset w9, eq ; CHECK-NEXT: orr w8, w8, w9 +; CHECK-NEXT: mov x0, xzr ; CHECK-NEXT: tbnz w8, #0, LBB4_2 ; CHECK-NEXT: ; %bb.1: ; %bb4 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill @@ -143,9 +152,7 @@ ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: bl _bar ; CHECK-NEXT: ldp x29, x30, [sp], #16 ; 16-byte Folded Reload -; CHECK-NEXT: ret -; CHECK-NEXT: LBB4_2: ; %bb3 -; CHECK-NEXT: mov x0, xzr +; CHECK-NEXT: LBB4_2: ; %common.ret ; CHECK-NEXT: ret bb1: %0 = icmp eq i32 %a, 0 @@ -169,6 +176,7 @@ ; CHECK-NEXT: cmp w1, #0 ; =0 ; CHECK-NEXT: cset w9, ne ; CHECK-NEXT: and w8, w8, w9 +; CHECK-NEXT: mov x0, xzr ; CHECK-NEXT: tbz w8, #0, LBB5_2 ; CHECK-NEXT: ; %bb.1: ; %bb4 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill @@ -177,9 +185,7 @@ ; CHECK-NEXT: .cfi_offset w29, -16 ; CHECK-NEXT: bl _bar ; CHECK-NEXT: ldp x29, x30, [sp], #16 ; 16-byte Folded Reload -; CHECK-NEXT: ret -; CHECK-NEXT: LBB5_2: ; %bb3 -; CHECK-NEXT: mov x0, xzr +; CHECK-NEXT: LBB5_2: ; %common.ret ; CHECK-NEXT: ret bb1: %0 = icmp ne i32 %a, 0 diff --git a/llvm/test/CodeGen/AArch64/implicit-null-check.ll b/llvm/test/CodeGen/AArch64/implicit-null-check.ll --- a/llvm/test/CodeGen/AArch64/implicit-null-check.ll +++ b/llvm/test/CodeGen/AArch64/implicit-null-check.ll @@ -13,7 +13,7 @@ ; CHECK-NEXT: ldr w0, [x0] // on-fault: .LBB0_2 ; CHECK-NEXT: // %bb.1: // %not_null ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB0_2: // %is_null +; CHECK-NEXT: .LBB0_2: ; CHECK-NEXT: mov w0, #42 ; CHECK-NEXT: ret entry: @@ -36,7 +36,7 @@ ; CHECK-NEXT: ldr w0, [x0] // on-fault: .LBB1_2 ; CHECK-NEXT: // %bb.1: // %not_null ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB1_2: // %is_null +; CHECK-NEXT: .LBB1_2: ; CHECK-NEXT: mov w0, #42 ; CHECK-NEXT: ret entry: @@ -58,7 +58,7 @@ ; CHECK-NEXT: ldr w0, [x0] // on-fault: .LBB2_2 ; CHECK-NEXT: // %bb.1: // %not_null ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB2_2: // %is_null +; CHECK-NEXT: .LBB2_2: ; CHECK-NEXT: mov w0, #42 ; CHECK-NEXT: ret entry: @@ -83,7 +83,7 @@ ; CHECK-NEXT: // %bb.1: // %not_null ; CHECK-NEXT: ldar w0, [x0] ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB3_2: // %is_null +; CHECK-NEXT: .LBB3_2: ; CHECK-NEXT: mov w0, #42 ; CHECK-NEXT: ret entry: @@ -106,7 +106,7 @@ ; CHECK-NEXT: // %bb.1: // %not_null ; CHECK-NEXT: ldr w0, [x0] ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB4_2: // %is_null +; CHECK-NEXT: .LBB4_2: ; CHECK-NEXT: mov w0, #42 ; CHECK-NEXT: ret entry: @@ -129,7 +129,7 @@ ; CHECK-NEXT: ldrb w0, [x0] // on-fault: .LBB5_2 ; CHECK-NEXT: // %bb.1: // %not_null ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB5_2: // %is_null +; CHECK-NEXT: .LBB5_2: ; CHECK-NEXT: mov w0, #42 ; CHECK-NEXT: ret entry: @@ -149,15 +149,14 @@ ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: cbz x0, .LBB6_2 ; CHECK-NEXT: // %bb.1: // %not_null -; CHECK-NEXT: ldp x8, x1, [x0] ; CHECK-NEXT: ldp x2, x3, [x0, #16] -; CHECK-NEXT: mov x0, x8 +; CHECK-NEXT: ldp x0, x1, [x0] ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB6_2: // %is_null -; CHECK-NEXT: mov w0, #42 +; CHECK-NEXT: .LBB6_2: ; CHECK-NEXT: mov x1, xzr ; CHECK-NEXT: mov x2, xzr ; CHECK-NEXT: mov x3, xzr +; CHECK-NEXT: mov w0, #42 ; CHECK-NEXT: ret entry: %c = icmp eq i256* %x, null @@ -180,7 +179,7 @@ ; CHECK-NEXT: ldr w0, [x0, #128] // on-fault: .LBB7_2 ; CHECK-NEXT: // %bb.1: // %not_null ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB7_2: // %is_null +; CHECK-NEXT: .LBB7_2: ; CHECK-NEXT: mov w0, #42 ; CHECK-NEXT: ret entry: @@ -204,7 +203,7 @@ ; CHECK-NEXT: // %bb.1: // %not_null ; CHECK-NEXT: add w0, w8, w1 ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB8_2: // %is_null +; CHECK-NEXT: .LBB8_2: ; CHECK-NEXT: mov w0, #42 ; CHECK-NEXT: ret entry: @@ -231,7 +230,7 @@ ; CHECK-NEXT: udiv w9, w1, w2 ; CHECK-NEXT: add w0, w8, w9 ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB9_2: // %is_null +; CHECK-NEXT: .LBB9_2: ; CHECK-NEXT: mov w0, #42 ; CHECK-NEXT: ret entry: @@ -260,7 +259,7 @@ ; CHECK-NEXT: ldr w0, [x0] ; CHECK-NEXT: str w8, [x2] ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB10_2: // %is_null +; CHECK-NEXT: .LBB10_2: ; CHECK-NEXT: mov w0, #42 ; CHECK-NEXT: ret entry: @@ -287,7 +286,7 @@ ; CHECK-NEXT: add w8, w9, w8 ; CHECK-NEXT: add w0, w8, #4 // =4 ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB11_2: // %is_null +; CHECK-NEXT: .LBB11_2: ; CHECK-NEXT: mov w0, #42 ; CHECK-NEXT: ret entry: @@ -316,7 +315,7 @@ ; CHECK-NEXT: dmb ishld ; CHECK-NEXT: ldr w0, [x0] ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB12_2: // %is_null +; CHECK-NEXT: .LBB12_2: ; CHECK-NEXT: mov w0, #42 ; CHECK-NEXT: ret entry: @@ -342,7 +341,7 @@ ; CHECK-NEXT: dmb ish ; CHECK-NEXT: ldr w0, [x0] ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB13_2: // %is_null +; CHECK-NEXT: .LBB13_2: ; CHECK-NEXT: mov w0, #42 ; CHECK-NEXT: ret entry: @@ -366,8 +365,7 @@ ; CHECK-NEXT: // %bb.1: // %not_null ; CHECK-NEXT: mov w8, #1 ; CHECK-NEXT: str w8, [x0] -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB14_2: // %is_null +; CHECK-NEXT: .LBB14_2: // %common.ret ; CHECK-NEXT: ret entry: %c = icmp eq i32* %x, null @@ -389,8 +387,7 @@ ; CHECK-NEXT: // %bb.1: // %not_null ; CHECK-NEXT: mov w8, #1 ; CHECK-NEXT: str w8, [x0] -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB15_2: // %is_null +; CHECK-NEXT: .LBB15_2: // %common.ret ; CHECK-NEXT: ret entry: %c = icmp eq i32* %x, null @@ -411,7 +408,7 @@ ; CHECK-NEXT: ldur w0, [x0, #-128] // on-fault: .LBB16_2 ; CHECK-NEXT: // %bb.1: // %not_null ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB16_2: // %is_null +; CHECK-NEXT: .LBB16_2: ; CHECK-NEXT: mov w0, #42 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/AArch64/ldst-opt-after-block-placement.ll b/llvm/test/CodeGen/AArch64/ldst-opt-after-block-placement.ll --- a/llvm/test/CodeGen/AArch64/ldst-opt-after-block-placement.ll +++ b/llvm/test/CodeGen/AArch64/ldst-opt-after-block-placement.ll @@ -20,7 +20,7 @@ ; CHECK-NEXT: b.ge .LBB0_4 ; CHECK-NEXT: .LBB0_3: // %exit1 ; CHECK-NEXT: str xzr, [x1, #8] -; CHECK-NEXT: .LBB0_4: // %exit2 +; CHECK-NEXT: .LBB0_4: // %common.ret ; CHECK-NEXT: ret entry: br i1 %cond, label %if.then, label %if.else diff --git a/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll b/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll --- a/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll +++ b/llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll @@ -32,7 +32,7 @@ ; CHECK-NEXT: b .LBB0_3 ; CHECK-NEXT: .LBB0_2: // %if.then ; CHECK-NEXT: bl f2 -; CHECK-NEXT: .LBB0_3: // %for.inc +; CHECK-NEXT: .LBB0_3: // %common.ret ; CHECK-NEXT: ldr x30, [sp, #96] // 8-byte Folded Reload ; CHECK-NEXT: mov w0, wzr ; CHECK-NEXT: add sp, sp, #112 // =112 diff --git a/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll b/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll --- a/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll +++ b/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll @@ -265,11 +265,12 @@ ; CHECK-NEXT: tst x9, x10, lsl #63 ; CHECK-NEXT: b.lt .LBB2_4 ; CHECK-NEXT: // %bb.2: // %test3 -; CHECK-NEXT: tst x9, x10, asr #12 -; CHECK-NEXT: b.gt .LBB2_4 +; CHECK-NEXT: and x10, x9, x10, asr #12 +; CHECK-NEXT: cmp x10, #1 // =1 +; CHECK-NEXT: b.ge .LBB2_4 ; CHECK-NEXT: // %bb.3: // %other_exit ; CHECK-NEXT: str x9, [x8] -; CHECK-NEXT: .LBB2_4: // %ret +; CHECK-NEXT: .LBB2_4: // %common.ret ; CHECK-NEXT: ret %val1 = load i64, i64* @var1_64 %val2 = load i64, i64* @var2_64 diff --git a/llvm/test/CodeGen/AArch64/optimize-cond-branch.ll b/llvm/test/CodeGen/AArch64/optimize-cond-branch.ll --- a/llvm/test/CodeGen/AArch64/optimize-cond-branch.ll +++ b/llvm/test/CodeGen/AArch64/optimize-cond-branch.ll @@ -13,24 +13,25 @@ define void @func() { ; CHECK-LABEL: func: ; CHECK: // %bb.0: -; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: mov w8, #1 ; CHECK-NEXT: cbnz w8, .LBB0_3 ; CHECK-NEXT: // %bb.1: // %b1 +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w30, -16 ; CHECK-NEXT: cbz wzr, .LBB0_4 ; CHECK-NEXT: // %bb.2: // %b3 ; CHECK-NEXT: ldr w8, [x8] -; CHECK-NEXT: tbz w8, #8, .LBB0_5 -; CHECK-NEXT: .LBB0_3: // %b7 +; CHECK-NEXT: and w0, w8, #0x100 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: cbz w0, .LBB0_5 +; CHECK-NEXT: .LBB0_3: // %common.ret.sink.split ; CHECK-NEXT: b extfunc ; CHECK-NEXT: .LBB0_4: // %b2 ; CHECK-NEXT: bl extfunc -; CHECK-NEXT: cbnz w0, .LBB0_3 -; CHECK-NEXT: .LBB0_5: // %b8 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: cbnz w0, .LBB0_3 +; CHECK-NEXT: .LBB0_5: // %common.ret ; CHECK-NEXT: ret %c0 = icmp sgt i64 0, 0 br i1 %c0, label %b1, label %b6 diff --git a/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll b/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll --- a/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll +++ b/llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll @@ -147,11 +147,11 @@ ; CHECK-NEXT: mov w19, w0 ; CHECK-NEXT: ldr x0, [sp, #8] ; CHECK-NEXT: bl consume -; CHECK-NEXT: and w0, w19, #0x1 ; CHECK-NEXT: b .LBB8_3 -; CHECK-NEXT: .LBB8_2: // %right -; CHECK-NEXT: mov w0, #1 -; CHECK-NEXT: .LBB8_3: // %right +; CHECK-NEXT: .LBB8_2: +; CHECK-NEXT: mov w19, #1 +; CHECK-NEXT: .LBB8_3: // %common.ret +; CHECK-NEXT: and w0, w19, #0x1 ; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload ; CHECK-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/sve-breakdown-scalable-vectortype.ll b/llvm/test/CodeGen/AArch64/sve-breakdown-scalable-vectortype.ll --- a/llvm/test/CodeGen/AArch64/sve-breakdown-scalable-vectortype.ll +++ b/llvm/test/CodeGen/AArch64/sve-breakdown-scalable-vectortype.ll @@ -13,6 +13,7 @@ define @wide_32i8(i1 %b, %legal, %illegal) nounwind { ; CHECK-LABEL: wide_32i8: ; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: tbz w0, #0, .LBB0_2 ; CHECK-NEXT: // %bb.1: // %L1 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill @@ -45,7 +46,13 @@ ; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: str z2, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: bl bar +; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr z2, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #2 ; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload @@ -76,9 +83,7 @@ ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #18 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB0_2: // %L2 -; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: .LBB0_2: // %common.ret ; CHECK-NEXT: mov z1.d, z2.d ; CHECK-NEXT: ret br i1 %b, label %L1, label %L2 @@ -92,6 +97,7 @@ define @wide_16i16(i1 %b, %legal, %illegal) nounwind { ; CHECK-LABEL: wide_16i16: ; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: tbz w0, #0, .LBB1_2 ; CHECK-NEXT: // %bb.1: // %L1 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill @@ -124,7 +130,13 @@ ; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: str z2, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: bl bar +; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr z2, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #2 ; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload @@ -155,9 +167,7 @@ ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #18 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB1_2: // %L2 -; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: .LBB1_2: // %common.ret ; CHECK-NEXT: mov z1.d, z2.d ; CHECK-NEXT: ret br i1 %b, label %L1, label %L2 @@ -171,6 +181,7 @@ define @wide_8i32(i1 %b, %legal, %illegal) nounwind { ; CHECK-LABEL: wide_8i32: ; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: tbz w0, #0, .LBB2_2 ; CHECK-NEXT: // %bb.1: // %L1 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill @@ -203,7 +214,13 @@ ; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: str z2, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: bl bar +; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr z2, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #2 ; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload @@ -234,9 +251,7 @@ ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #18 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB2_2: // %L2 -; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: .LBB2_2: // %common.ret ; CHECK-NEXT: mov z1.d, z2.d ; CHECK-NEXT: ret br i1 %b, label %L1, label %L2 @@ -250,6 +265,7 @@ define @wide_4i64(i1 %b, %legal, %illegal) nounwind { ; CHECK-LABEL: wide_4i64: ; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: tbz w0, #0, .LBB3_2 ; CHECK-NEXT: // %bb.1: // %L1 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill @@ -282,7 +298,13 @@ ; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: str z2, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: bl bar +; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr z2, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #2 ; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload @@ -313,9 +335,7 @@ ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #18 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB3_2: // %L2 -; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: .LBB3_2: // %common.ret ; CHECK-NEXT: mov z1.d, z2.d ; CHECK-NEXT: ret br i1 %b, label %L1, label %L2 @@ -329,6 +349,7 @@ define @wide_16f16(i1 %b, %legal, %illegal) nounwind { ; CHECK-LABEL: wide_16f16: ; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: tbz w0, #0, .LBB4_2 ; CHECK-NEXT: // %bb.1: // %L1 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill @@ -361,7 +382,13 @@ ; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: str z2, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: bl bar +; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr z2, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #2 ; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload @@ -392,9 +419,7 @@ ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #18 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB4_2: // %L2 -; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: .LBB4_2: // %common.ret ; CHECK-NEXT: mov z1.d, z2.d ; CHECK-NEXT: ret br i1 %b, label %L1, label %L2 @@ -408,6 +433,7 @@ define @wide_8f32(i1 %b, %legal, %illegal) nounwind { ; CHECK-LABEL: wide_8f32: ; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: tbz w0, #0, .LBB5_2 ; CHECK-NEXT: // %bb.1: // %L1 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill @@ -440,7 +466,13 @@ ; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: str z2, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: bl bar +; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr z2, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #2 ; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload @@ -471,9 +503,7 @@ ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #18 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB5_2: // %L2 -; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: .LBB5_2: // %common.ret ; CHECK-NEXT: mov z1.d, z2.d ; CHECK-NEXT: ret br i1 %b, label %L1, label %L2 @@ -487,6 +517,7 @@ define @wide_4f64(i1 %b, %legal, %illegal) nounwind { ; CHECK-LABEL: wide_4f64: ; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: tbz w0, #0, .LBB6_2 ; CHECK-NEXT: // %bb.1: // %L1 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill @@ -519,7 +550,13 @@ ; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-2 +; CHECK-NEXT: str z2, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: bl bar +; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr z2, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #2 ; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload @@ -550,9 +587,7 @@ ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #18 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB6_2: // %L2 -; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: .LBB6_2: // %common.ret ; CHECK-NEXT: mov z1.d, z2.d ; CHECK-NEXT: ret br i1 %b, label %L1, label %L2 @@ -570,6 +605,7 @@ define @wide_48i8(i1 %b, %legal, %illegal) nounwind { ; CHECK-LABEL: wide_48i8: ; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: tbz w0, #0, .LBB7_2 ; CHECK-NEXT: // %bb.1: // %L1 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill @@ -602,7 +638,15 @@ ; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-3 +; CHECK-NEXT: str z3, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z2, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: bl bar +; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr z2, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z3, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #3 ; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload @@ -633,9 +677,7 @@ ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #18 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB7_2: // %L2 -; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: .LBB7_2: // %common.ret ; CHECK-NEXT: mov z1.d, z2.d ; CHECK-NEXT: mov z2.d, z3.d ; CHECK-NEXT: ret @@ -650,6 +692,7 @@ define @wide_24i16(i1 %b, %legal, %illegal) nounwind { ; CHECK-LABEL: wide_24i16: ; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: tbz w0, #0, .LBB8_2 ; CHECK-NEXT: // %bb.1: // %L1 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill @@ -682,7 +725,15 @@ ; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-3 +; CHECK-NEXT: str z3, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z2, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: bl bar +; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr z2, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z3, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #3 ; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload @@ -713,9 +764,7 @@ ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #18 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB8_2: // %L2 -; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: .LBB8_2: // %common.ret ; CHECK-NEXT: mov z1.d, z2.d ; CHECK-NEXT: mov z2.d, z3.d ; CHECK-NEXT: ret @@ -730,6 +779,7 @@ define @wide_12i32(i1 %b, %legal, %illegal) nounwind { ; CHECK-LABEL: wide_12i32: ; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: tbz w0, #0, .LBB9_2 ; CHECK-NEXT: // %bb.1: // %L1 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill @@ -762,7 +812,15 @@ ; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-3 +; CHECK-NEXT: str z3, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z2, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: bl bar +; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr z2, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z3, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #3 ; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload @@ -793,9 +851,7 @@ ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #18 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB9_2: // %L2 -; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: .LBB9_2: // %common.ret ; CHECK-NEXT: mov z1.d, z2.d ; CHECK-NEXT: mov z2.d, z3.d ; CHECK-NEXT: ret @@ -810,6 +866,7 @@ define @wide_6i64(i1 %b, %legal, %illegal) nounwind { ; CHECK-LABEL: wide_6i64: ; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: tbz w0, #0, .LBB10_2 ; CHECK-NEXT: // %bb.1: // %L1 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill @@ -842,7 +899,15 @@ ; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-3 +; CHECK-NEXT: str z3, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z2, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: bl bar +; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr z2, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z3, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #3 ; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload @@ -873,9 +938,7 @@ ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #18 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB10_2: // %L2 -; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: .LBB10_2: // %common.ret ; CHECK-NEXT: mov z1.d, z2.d ; CHECK-NEXT: mov z2.d, z3.d ; CHECK-NEXT: ret @@ -890,6 +953,7 @@ define @wide_24f16(i1 %b, %legal, %illegal) nounwind { ; CHECK-LABEL: wide_24f16: ; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: tbz w0, #0, .LBB11_2 ; CHECK-NEXT: // %bb.1: // %L1 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill @@ -922,7 +986,15 @@ ; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-3 +; CHECK-NEXT: str z3, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z2, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: bl bar +; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr z2, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z3, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #3 ; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload @@ -953,9 +1025,7 @@ ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #18 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB11_2: // %L2 -; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: .LBB11_2: // %common.ret ; CHECK-NEXT: mov z1.d, z2.d ; CHECK-NEXT: mov z2.d, z3.d ; CHECK-NEXT: ret @@ -970,6 +1040,7 @@ define @wide_12f32(i1 %b, %legal, %illegal) nounwind { ; CHECK-LABEL: wide_12f32: ; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: tbz w0, #0, .LBB12_2 ; CHECK-NEXT: // %bb.1: // %L1 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill @@ -1002,7 +1073,15 @@ ; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-3 +; CHECK-NEXT: str z3, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z2, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: bl bar +; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr z2, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z3, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #3 ; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload @@ -1033,9 +1112,7 @@ ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #18 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB12_2: // %L2 -; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: .LBB12_2: // %common.ret ; CHECK-NEXT: mov z1.d, z2.d ; CHECK-NEXT: mov z2.d, z3.d ; CHECK-NEXT: ret @@ -1050,6 +1127,7 @@ define @wide_6f64(i1 %b, %legal, %illegal) nounwind { ; CHECK-LABEL: wide_6f64: ; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: tbz w0, #0, .LBB13_2 ; CHECK-NEXT: // %bb.1: // %L1 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill @@ -1082,7 +1160,15 @@ ; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-3 +; CHECK-NEXT: str z3, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z2, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: bl bar +; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr z2, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z3, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #3 ; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload @@ -1113,9 +1199,7 @@ ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #18 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB13_2: // %L2 -; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: .LBB13_2: // %common.ret ; CHECK-NEXT: mov z1.d, z2.d ; CHECK-NEXT: mov z2.d, z3.d ; CHECK-NEXT: ret @@ -1134,6 +1218,7 @@ define @wide_64i8(i1 %b, %legal, %illegal) nounwind { ; CHECK-LABEL: wide_64i8: ; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: tbz w0, #0, .LBB14_2 ; CHECK-NEXT: // %bb.1: // %L1 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill @@ -1166,7 +1251,17 @@ ; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-4 +; CHECK-NEXT: str z4, [sp, #3, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z3, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z2, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: bl bar +; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr z2, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z3, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z4, [sp, #3, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #4 ; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload @@ -1197,9 +1292,7 @@ ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #18 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB14_2: // %L2 -; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: .LBB14_2: // %common.ret ; CHECK-NEXT: mov z1.d, z2.d ; CHECK-NEXT: mov z2.d, z3.d ; CHECK-NEXT: mov z3.d, z4.d @@ -1215,6 +1308,7 @@ define @wide_32i16(i1 %b, %legal, %illegal) nounwind { ; CHECK-LABEL: wide_32i16: ; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: tbz w0, #0, .LBB15_2 ; CHECK-NEXT: // %bb.1: // %L1 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill @@ -1247,7 +1341,17 @@ ; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-4 +; CHECK-NEXT: str z4, [sp, #3, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z3, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z2, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: bl bar +; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr z2, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z3, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z4, [sp, #3, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #4 ; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload @@ -1278,9 +1382,7 @@ ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #18 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB15_2: // %L2 -; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: .LBB15_2: // %common.ret ; CHECK-NEXT: mov z1.d, z2.d ; CHECK-NEXT: mov z2.d, z3.d ; CHECK-NEXT: mov z3.d, z4.d @@ -1296,6 +1398,7 @@ define @wide_16i32(i1 %b, %legal, %illegal) nounwind { ; CHECK-LABEL: wide_16i32: ; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: tbz w0, #0, .LBB16_2 ; CHECK-NEXT: // %bb.1: // %L1 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill @@ -1328,7 +1431,17 @@ ; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-4 +; CHECK-NEXT: str z4, [sp, #3, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z3, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z2, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: bl bar +; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr z2, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z3, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z4, [sp, #3, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #4 ; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload @@ -1359,9 +1472,7 @@ ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #18 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB16_2: // %L2 -; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: .LBB16_2: // %common.ret ; CHECK-NEXT: mov z1.d, z2.d ; CHECK-NEXT: mov z2.d, z3.d ; CHECK-NEXT: mov z3.d, z4.d @@ -1377,6 +1488,7 @@ define @wide_8i64(i1 %b, %legal, %illegal) nounwind { ; CHECK-LABEL: wide_8i64: ; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: tbz w0, #0, .LBB17_2 ; CHECK-NEXT: // %bb.1: // %L1 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill @@ -1409,7 +1521,17 @@ ; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-4 +; CHECK-NEXT: str z4, [sp, #3, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z3, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z2, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: bl bar +; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr z2, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z3, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z4, [sp, #3, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #4 ; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload @@ -1440,9 +1562,7 @@ ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #18 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB17_2: // %L2 -; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: .LBB17_2: // %common.ret ; CHECK-NEXT: mov z1.d, z2.d ; CHECK-NEXT: mov z2.d, z3.d ; CHECK-NEXT: mov z3.d, z4.d @@ -1458,6 +1578,7 @@ define @wide_32f16(i1 %b, %legal, %illegal) nounwind { ; CHECK-LABEL: wide_32f16: ; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: tbz w0, #0, .LBB18_2 ; CHECK-NEXT: // %bb.1: // %L1 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill @@ -1490,7 +1611,17 @@ ; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-4 +; CHECK-NEXT: str z4, [sp, #3, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z3, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z2, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: bl bar +; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr z2, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z3, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z4, [sp, #3, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #4 ; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload @@ -1521,9 +1652,7 @@ ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #18 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB18_2: // %L2 -; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: .LBB18_2: // %common.ret ; CHECK-NEXT: mov z1.d, z2.d ; CHECK-NEXT: mov z2.d, z3.d ; CHECK-NEXT: mov z3.d, z4.d @@ -1539,6 +1668,7 @@ define @wide_16f32(i1 %b, %legal, %illegal) nounwind { ; CHECK-LABEL: wide_16f32: ; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: tbz w0, #0, .LBB19_2 ; CHECK-NEXT: // %bb.1: // %L1 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill @@ -1571,7 +1701,17 @@ ; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-4 +; CHECK-NEXT: str z4, [sp, #3, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z3, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z2, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: bl bar +; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr z2, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z3, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z4, [sp, #3, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #4 ; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload @@ -1602,9 +1742,7 @@ ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #18 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB19_2: // %L2 -; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: .LBB19_2: // %common.ret ; CHECK-NEXT: mov z1.d, z2.d ; CHECK-NEXT: mov z2.d, z3.d ; CHECK-NEXT: mov z3.d, z4.d @@ -1620,6 +1758,7 @@ define @wide_8f64(i1 %b, %legal, %illegal) nounwind { ; CHECK-LABEL: wide_8f64: ; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: tbz w0, #0, .LBB20_2 ; CHECK-NEXT: // %bb.1: // %L1 ; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill @@ -1652,7 +1791,17 @@ ; CHECK-NEXT: str z10, [sp, #15, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z9, [sp, #16, mul vl] // 16-byte Folded Spill ; CHECK-NEXT: str z8, [sp, #17, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-4 +; CHECK-NEXT: str z4, [sp, #3, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z3, [sp, #2, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z2, [sp, #1, mul vl] // 16-byte Folded Spill +; CHECK-NEXT: str z0, [sp] // 16-byte Folded Spill ; CHECK-NEXT: bl bar +; CHECK-NEXT: ldr z0, [sp] // 16-byte Folded Reload +; CHECK-NEXT: ldr z2, [sp, #1, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z3, [sp, #2, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: ldr z4, [sp, #3, mul vl] // 16-byte Folded Reload +; CHECK-NEXT: addvl sp, sp, #4 ; CHECK-NEXT: ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload @@ -1683,9 +1832,7 @@ ; CHECK-NEXT: ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #18 ; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB20_2: // %L2 -; CHECK-NEXT: mov z0.d, z1.d +; CHECK-NEXT: .LBB20_2: // %common.ret ; CHECK-NEXT: mov z1.d, z2.d ; CHECK-NEXT: mov z2.d, z3.d ; CHECK-NEXT: mov z3.d, z4.d diff --git a/llvm/test/CodeGen/AArch64/swifterror.ll b/llvm/test/CodeGen/AArch64/swifterror.ll --- a/llvm/test/CodeGen/AArch64/swifterror.ll +++ b/llvm/test/CodeGen/AArch64/swifterror.ll @@ -426,13 +426,13 @@ ; CHECK-APPLE-NEXT: ; %bb.1: ; %gen_error ; CHECK-APPLE-NEXT: mov w0, #16 ; CHECK-APPLE-NEXT: bl _malloc -; CHECK-APPLE-NEXT: mov w8, #1 -; CHECK-APPLE-NEXT: fmov s0, #1.00000000 -; CHECK-APPLE-NEXT: strb w8, [x0, #8] ; CHECK-APPLE-NEXT: mov x21, x0 +; CHECK-APPLE-NEXT: mov w8, #1 +; CHECK-APPLE-NEXT: strb w8, [x0, #8] +; CHECK-APPLE-NEXT: fmov s0, #1.00000000 ; CHECK-APPLE-NEXT: ldp x29, x30, [sp], #16 ; 16-byte Folded Reload ; CHECK-APPLE-NEXT: ret -; CHECK-APPLE-NEXT: LBB3_2: ; %normal +; CHECK-APPLE-NEXT: LBB3_2: ; CHECK-APPLE-NEXT: movi d0, #0000000000000000 ; CHECK-APPLE-NEXT: ldp x29, x30, [sp], #16 ; 16-byte Folded Reload ; CHECK-APPLE-NEXT: ret diff --git a/llvm/test/CodeGen/AArch64/use-cr-result-of-dom-icmp-st.ll b/llvm/test/CodeGen/AArch64/use-cr-result-of-dom-icmp-st.ll --- a/llvm/test/CodeGen/AArch64/use-cr-result-of-dom-icmp-st.ll +++ b/llvm/test/CodeGen/AArch64/use-cr-result-of-dom-icmp-st.ll @@ -19,13 +19,9 @@ ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: lsl x8, x0, x1 ; CHECK-NEXT: cmn x8, #2 // =2 -; CHECK-NEXT: b.le .LBB0_2 -; CHECK-NEXT: // %bb.1: // %return -; CHECK-NEXT: mov x0, x1 -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB0_2: // %if.end -; CHECK-NEXT: csinc x8, x1, xzr, ge -; CHECK-NEXT: mul x0, x8, x0 +; CHECK-NEXT: csinc x8, x1, xzr, eq +; CHECK-NEXT: mul x8, x8, x0 +; CHECK-NEXT: csel x0, x1, x8, gt ; CHECK-NEXT: ret entry: %shl = shl i64 %a, %b @@ -46,14 +42,11 @@ ; CHECK-LABEL: ll_a_op_b__1: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: lsl x8, x0, x1 -; CHECK-NEXT: tbnz x8, #63, .LBB1_2 -; CHECK-NEXT: // %bb.1: // %return -; CHECK-NEXT: mov x0, x1 -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB1_2: // %if.end ; CHECK-NEXT: cmn x8, #1 // =1 -; CHECK-NEXT: csinc x8, x1, xzr, ge -; CHECK-NEXT: mul x0, x8, x0 +; CHECK-NEXT: csinc x9, x1, xzr, eq +; CHECK-NEXT: mul x9, x9, x0 +; CHECK-NEXT: cmp x8, #0 // =0 +; CHECK-NEXT: csel x0, x1, x9, ge ; CHECK-NEXT: ret entry: %shl = shl i64 %a, %b @@ -75,13 +68,9 @@ ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: lsl x8, x0, x1 ; CHECK-NEXT: cmp x8, #0 // =0 -; CHECK-NEXT: b.le .LBB2_2 -; CHECK-NEXT: // %bb.1: // %return -; CHECK-NEXT: mov x0, x1 -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB2_2: // %if.end -; CHECK-NEXT: csinc x8, x1, xzr, ge -; CHECK-NEXT: mul x0, x8, x0 +; CHECK-NEXT: csinc x8, x1, xzr, eq +; CHECK-NEXT: mul x8, x8, x0 +; CHECK-NEXT: csel x0, x1, x8, gt ; CHECK-NEXT: ret entry: %shl = shl i64 %a, %b @@ -103,13 +92,9 @@ ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: lsl x8, x0, x1 ; CHECK-NEXT: cmp x8, #1 // =1 -; CHECK-NEXT: b.le .LBB3_2 -; CHECK-NEXT: // %bb.1: // %return -; CHECK-NEXT: mov x0, x1 -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB3_2: // %if.end -; CHECK-NEXT: csinc x8, x1, xzr, ge -; CHECK-NEXT: mul x0, x8, x0 +; CHECK-NEXT: csinc x8, x1, xzr, eq +; CHECK-NEXT: mul x8, x8, x0 +; CHECK-NEXT: csel x0, x1, x8, gt ; CHECK-NEXT: ret entry: %shl = shl i64 %a, %b @@ -131,13 +116,9 @@ ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: lsl x8, x0, x1 ; CHECK-NEXT: cmp x8, #2 // =2 -; CHECK-NEXT: b.le .LBB4_2 -; CHECK-NEXT: // %bb.1: // %return -; CHECK-NEXT: mov x0, x1 -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB4_2: // %if.end -; CHECK-NEXT: csinc x8, x1, xzr, ge -; CHECK-NEXT: mul x0, x8, x0 +; CHECK-NEXT: csinc x8, x1, xzr, eq +; CHECK-NEXT: mul x8, x8, x0 +; CHECK-NEXT: csel x0, x1, x8, gt ; CHECK-NEXT: ret entry: %shl = shl i64 %a, %b @@ -158,13 +139,9 @@ ; CHECK-LABEL: ll_a__2: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: cmn x0, #2 // =2 -; CHECK-NEXT: b.le .LBB5_2 -; CHECK-NEXT: // %bb.1: // %return -; CHECK-NEXT: mov x0, x1 -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB5_2: // %if.end -; CHECK-NEXT: csinc x8, x1, xzr, ge -; CHECK-NEXT: mul x0, x8, x0 +; CHECK-NEXT: csinc x8, x1, xzr, eq +; CHECK-NEXT: mul x8, x8, x0 +; CHECK-NEXT: csel x0, x1, x8, gt ; CHECK-NEXT: ret entry: %cmp = icmp sgt i64 %a, -2 @@ -183,14 +160,11 @@ define i64 @ll_a__1(i64 %a, i64 %b) { ; CHECK-LABEL: ll_a__1: ; CHECK: // %bb.0: // %entry -; CHECK-NEXT: tbnz x0, #63, .LBB6_2 -; CHECK-NEXT: // %bb.1: // %return -; CHECK-NEXT: mov x0, x1 -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB6_2: // %if.end ; CHECK-NEXT: cmn x0, #1 // =1 -; CHECK-NEXT: csinc x8, x1, xzr, ge -; CHECK-NEXT: mul x0, x8, x0 +; CHECK-NEXT: csinc x8, x1, xzr, eq +; CHECK-NEXT: mul x8, x8, x0 +; CHECK-NEXT: cmp x0, #0 // =0 +; CHECK-NEXT: csel x0, x1, x8, ge ; CHECK-NEXT: ret entry: %cmp = icmp sgt i64 %a, -1 @@ -210,13 +184,9 @@ ; CHECK-LABEL: ll_a_0: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: cmp x0, #0 // =0 -; CHECK-NEXT: b.le .LBB7_2 -; CHECK-NEXT: // %bb.1: // %return -; CHECK-NEXT: mov x0, x1 -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB7_2: // %if.end -; CHECK-NEXT: csinc x8, x1, xzr, ge -; CHECK-NEXT: mul x0, x8, x0 +; CHECK-NEXT: csinc x8, x1, xzr, eq +; CHECK-NEXT: mul x8, x8, x0 +; CHECK-NEXT: csel x0, x1, x8, gt ; CHECK-NEXT: ret entry: %cmp = icmp sgt i64 %a, 0 @@ -236,13 +206,9 @@ ; CHECK-LABEL: ll_a_1: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: cmp x0, #1 // =1 -; CHECK-NEXT: b.le .LBB8_2 -; CHECK-NEXT: // %bb.1: // %return -; CHECK-NEXT: mov x0, x1 -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB8_2: // %if.end -; CHECK-NEXT: csinc x8, x1, xzr, ge -; CHECK-NEXT: mul x0, x8, x0 +; CHECK-NEXT: csinc x8, x1, xzr, eq +; CHECK-NEXT: mul x8, x8, x0 +; CHECK-NEXT: csel x0, x1, x8, gt ; CHECK-NEXT: ret entry: %cmp = icmp sgt i64 %a, 1 @@ -262,13 +228,9 @@ ; CHECK-LABEL: ll_a_2: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: cmp x0, #2 // =2 -; CHECK-NEXT: b.le .LBB9_2 -; CHECK-NEXT: // %bb.1: // %return -; CHECK-NEXT: mov x0, x1 -; CHECK-NEXT: ret -; CHECK-NEXT: .LBB9_2: // %if.end -; CHECK-NEXT: csinc x8, x1, xzr, ge -; CHECK-NEXT: mul x0, x8, x0 +; CHECK-NEXT: csinc x8, x1, xzr, eq +; CHECK-NEXT: mul x8, x8, x0 +; CHECK-NEXT: csel x0, x1, x8, gt ; CHECK-NEXT: ret entry: %cmp = icmp sgt i64 %a, 2 diff --git a/llvm/test/CodeGen/AArch64/vec-extract-branch.ll b/llvm/test/CodeGen/AArch64/vec-extract-branch.ll --- a/llvm/test/CodeGen/AArch64/vec-extract-branch.ll +++ b/llvm/test/CodeGen/AArch64/vec-extract-branch.ll @@ -14,7 +14,7 @@ ; CHECK-NEXT: mov w8, #42 ; CHECK-NEXT: sdiv w0, w8, w0 ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB0_2: // %false +; CHECK-NEXT: .LBB0_2: ; CHECK-NEXT: mov w0, #88 ; CHECK-NEXT: ret %t1 = fcmp ogt <2 x double> %x, zeroinitializer diff --git a/llvm/test/CodeGen/ARM/ifcvt-callback.ll b/llvm/test/CodeGen/ARM/ifcvt-callback.ll --- a/llvm/test/CodeGen/ARM/ifcvt-callback.ll +++ b/llvm/test/CodeGen/ARM/ifcvt-callback.ll @@ -7,11 +7,12 @@ define i32 @test_ifcvt(i32 %a, i32 %b) #0 { ; CHECK-LABEL: test_ifcvt: -; CHECK: @ %bb.0: +; CHECK: @ %bb.0: @ %common.ret +; CHECK-NEXT: movs r2, #1 ; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: ite eq -; CHECK-NEXT: subeq r0, r1, #1 -; CHECK-NEXT: addne r0, r1, #1 +; CHECK-NEXT: it eq +; CHECK-NEXT: moveq.w r2, #-1 +; CHECK-NEXT: adds r0, r1, r2 ; CHECK-NEXT: bx lr %tmp2 = icmp eq i32 %a, 0 br i1 %tmp2, label %cond_false, label %cond_true diff --git a/llvm/test/CodeGen/ARM/ifcvt-iter-indbr.ll b/llvm/test/CodeGen/ARM/ifcvt-iter-indbr.ll --- a/llvm/test/CodeGen/ARM/ifcvt-iter-indbr.ll +++ b/llvm/test/CodeGen/ARM/ifcvt-iter-indbr.ll @@ -43,22 +43,25 @@ ; CHECK-NEXT: mov r2, r8 ; CHECK-NEXT: bl _bar ; CHECK-NEXT: cmp r5, #21 -; CHECK-NEXT: itt eq +; CHECK-NEXT: itttt eq +; CHECK-NEXT: moveq r1, r0 ; CHECK-NEXT: streq.w r5, [r11] -; CHECK-NEXT: moveq pc, r0 +; CHECK-NEXT: movweq r0, #1234 +; CHECK-NEXT: moveq pc, r1 ; CHECK-NEXT: LBB0_1: ; CHECK-NEXT: cmp r4, #42 -; CHECK-NEXT: beq LBB0_3 +; CHECK-NEXT: beq LBB0_4 ; CHECK-NEXT: ldr r0, [sp] ; CHECK-NEXT: str r5, [r0] +; CHECK-NEXT: movw r0, #1234 ; CHECK-NEXT: mov pc, r10 ; CHECK-NEXT: Ltmp0: ; CHECK-NEXT: LBB0_3: -; CHECK-NEXT: movw r0, #1234 -; CHECK-NEXT: b LBB0_5 -; CHECK-NEXT: Ltmp1: -; CHECK-NEXT: LBB0_4: ; CHECK-NEXT: movw r0, #4567 +; CHECK-NEXT: b LBB0_5 +; CHECK-NEXT: LBB0_4: +; CHECK-NEXT: movw r0, #1234 +; CHECK-NEXT: Ltmp1: ; CHECK-NEXT: LBB0_5: ; CHECK-NEXT: bl _foo ; CHECK-NEXT: add sp, #4 diff --git a/llvm/test/CodeGen/ARM/ifcvt1.ll b/llvm/test/CodeGen/ARM/ifcvt1.ll --- a/llvm/test/CodeGen/ARM/ifcvt1.ll +++ b/llvm/test/CodeGen/ARM/ifcvt1.ll @@ -4,17 +4,19 @@ define i32 @t1(i32 %a, i32 %b) { ; A8-LABEL: t1: -; A8: @ %bb.0: +; A8: @ %bb.0: @ %common.ret +; A8-NEXT: mov r2, #1 ; A8-NEXT: cmp r0, #0 -; A8-NEXT: subeq r0, r1, #1 -; A8-NEXT: addne r0, r1, #1 +; A8-NEXT: mvneq r2, #0 +; A8-NEXT: add r0, r1, r2 ; A8-NEXT: bx lr ; ; SWIFT-LABEL: t1: -; SWIFT: @ %bb.0: +; SWIFT: @ %bb.0: @ %common.ret +; SWIFT-NEXT: mov r2, #1 ; SWIFT-NEXT: cmp r0, #0 -; SWIFT-NEXT: sub r0, r1, #1 -; SWIFT-NEXT: addne r0, r1, #1 +; SWIFT-NEXT: mvneq r2, #0 +; SWIFT-NEXT: add r0, r1, r2 ; SWIFT-NEXT: bx lr %tmp2 = icmp eq i32 %a, 0 br i1 %tmp2, label %cond_false, label %cond_true diff --git a/llvm/test/CodeGen/ARM/ifcvt3.ll b/llvm/test/CodeGen/ARM/ifcvt3.ll --- a/llvm/test/CodeGen/ARM/ifcvt3.ll +++ b/llvm/test/CodeGen/ARM/ifcvt3.ll @@ -6,8 +6,8 @@ define i32 @t1(i32 %a, i32 %b, i32 %c, i32 %d) { ; CHECK-LABEL: t1: ; CHECK: @ %bb.0: -; CHECK-NEXT: cmp r2, #7 -; CHECK-NEXT: cmpne r2, #1 +; CHECK-NEXT: cmp r2, #1 +; CHECK-NEXT: cmpne r2, #7 ; CHECK-NEXT: addne r0, r1, r0 ; CHECK-NEXT: addeq r0, r0, r1 ; CHECK-NEXT: addeq r0, r0, #1 diff --git a/llvm/test/CodeGen/ARM/ifcvt5.ll b/llvm/test/CodeGen/ARM/ifcvt5.ll --- a/llvm/test/CodeGen/ARM/ifcvt5.ll +++ b/llvm/test/CodeGen/ARM/ifcvt5.ll @@ -34,29 +34,33 @@ define i32 @t1(i32 %a, i32 %b) "frame-pointer"="all" { ; A8-LABEL: t1: ; A8: @ %bb.0: @ %entry -; A8-NEXT: cmp r0, #11 -; A8-NEXT: movlt r0, #1 -; A8-NEXT: bxlt lr +; A8-NEXT: mov r2, r0 +; A8-NEXT: mov r0, #1 +; A8-NEXT: cmp r2, #10 +; A8-NEXT: bxle lr ; A8-NEXT: LBB1_1: @ %cond_true ; A8-NEXT: push {r7, lr} ; A8-NEXT: mov r7, sp ; A8-NEXT: mov r0, r1 ; A8-NEXT: bl _foo ; A8-NEXT: mov r0, #0 -; A8-NEXT: pop {r7, pc} +; A8-NEXT: pop {r7, lr} +; A8-NEXT: bx lr ; ; SWIFT-LABEL: t1: ; SWIFT: @ %bb.0: @ %entry -; SWIFT-NEXT: cmp r0, #11 -; SWIFT-NEXT: movlt r0, #1 -; SWIFT-NEXT: bxlt lr +; SWIFT-NEXT: mov r2, r0 +; SWIFT-NEXT: mov r0, #1 +; SWIFT-NEXT: cmp r2, #10 +; SWIFT-NEXT: bxle lr ; SWIFT-NEXT: LBB1_1: @ %cond_true ; SWIFT-NEXT: push {r7, lr} ; SWIFT-NEXT: mov r7, sp ; SWIFT-NEXT: mov r0, r1 ; SWIFT-NEXT: bl _foo ; SWIFT-NEXT: mov r0, #0 -; SWIFT-NEXT: pop {r7, pc} +; SWIFT-NEXT: pop {r7, lr} +; SWIFT-NEXT: bx lr entry: %tmp1 = icmp sgt i32 %a, 10 ; [#uses=1] br i1 %tmp1, label %cond_true, label %UnifiedReturnBlock diff --git a/llvm/test/CodeGen/ARM/ifcvt6.ll b/llvm/test/CodeGen/ARM/ifcvt6.ll --- a/llvm/test/CodeGen/ARM/ifcvt6.ll +++ b/llvm/test/CodeGen/ARM/ifcvt6.ll @@ -5,9 +5,11 @@ ; CHECK-LABEL: foo: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cmpne r0, #3 -; CHECK-NEXT: bxhi lr -; CHECK-NEXT: LBB0_1: @ %cond_true +; CHECK-NEXT: beq LBB0_2 +; CHECK-NEXT: @ %bb.1: @ %entry +; CHECK-NEXT: cmp r0, #4 +; CHECK-NEXT: bxhs lr +; CHECK-NEXT: LBB0_2: @ %cond_true ; CHECK-NEXT: push {lr} ; CHECK-NEXT: bl _bar ; CHECK-NEXT: pop {lr} diff --git a/llvm/test/CodeGen/ARM/load-global2.ll b/llvm/test/CodeGen/ARM/load-global2.ll --- a/llvm/test/CodeGen/ARM/load-global2.ll +++ b/llvm/test/CodeGen/ARM/load-global2.ll @@ -10,18 +10,22 @@ ; LINUX-PIC-NEXT: .save {r4, lr} ; LINUX-PIC-NEXT: push {r4, lr} ; LINUX-PIC-NEXT: ldr r4, .LCPI0_0 +; LINUX-PIC-NEXT: mov r0, #0 ; LINUX-PIC-NEXT: .LPC0_0: ; LINUX-PIC-NEXT: ldr r4, [pc, r4] -; LINUX-PIC-NEXT: ldrb r0, [r4] -; LINUX-PIC-NEXT: cmp r0, #0 -; LINUX-PIC-NEXT: movne r0, #0 -; LINUX-PIC-NEXT: popne {r4, pc} -; LINUX-PIC-NEXT: .LBB0_1: @ %bb1 +; LINUX-PIC-NEXT: ldrb r1, [r4] +; LINUX-PIC-NEXT: cmp r1, #0 +; LINUX-PIC-NEXT: beq .LBB0_2 +; LINUX-PIC-NEXT: @ %bb.1: @ %common.ret +; LINUX-PIC-NEXT: sxtb r0, r0 +; LINUX-PIC-NEXT: pop {r4, pc} +; LINUX-PIC-NEXT: .LBB0_2: @ %bb1 ; LINUX-PIC-NEXT: bl bar -; LINUX-PIC-NEXT: ldrsb r0, [r4] +; LINUX-PIC-NEXT: ldrb r0, [r4] +; LINUX-PIC-NEXT: sxtb r0, r0 ; LINUX-PIC-NEXT: pop {r4, pc} ; LINUX-PIC-NEXT: .p2align 2 -; LINUX-PIC-NEXT: @ %bb.2: +; LINUX-PIC-NEXT: @ %bb.3: ; LINUX-PIC-NEXT: .LCPI0_0: ; LINUX-PIC-NEXT: .Ltmp0: ; LINUX-PIC-NEXT: .long x(GOT_PREL)-((.LPC0_0+8)-.Ltmp0) diff --git a/llvm/test/CodeGen/ARM/smml.ll b/llvm/test/CodeGen/ARM/smml.ll --- a/llvm/test/CodeGen/ARM/smml.ll +++ b/llvm/test/CodeGen/ARM/smml.ll @@ -1,13 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=arm-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-V4 -; RUN: llc -mtriple=armv6-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-V6 -; RUN: llc -mtriple=armv7-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-V6 +; RUN: llc -mtriple=armv6-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-V6V7,CHECK-V6 +; RUN: llc -mtriple=armv7-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-V6V7,CHECK-V7 ; RUN: llc -mtriple=thumb-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-THUMB ; RUN: llc -mtriple=thumbv6-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-THUMBV6 ; RUN: llc -mtriple=thumbv6t2-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-THUMBV6T2 -; RUN: llc -mtriple=thumbv7-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-THUMBV6T2 -; RUN: llc -mtriple=thumbv7m-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-V4-THUMBV7M -; RUN: llc -mtriple=thumbv7em-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-THUMBV6T2 +; RUN: llc -mtriple=thumbv7-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-THUMBV7 +; RUN: llc -mtriple=thumbv7m-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-THUMBV7M +; RUN: llc -mtriple=thumbv7em-eabi %s -o - | FileCheck %s --check-prefixes=CHECK-THUMBV7EM ; Next test would previously trigger an assertion responsible for verification of ; call site info state. @@ -22,11 +22,11 @@ ; CHECK-V4-NEXT: sub r0, r0, r12 ; CHECK-V4-NEXT: mov pc, lr ; -; CHECK-V6-LABEL: Test0: -; CHECK-V6: @ %bb.0: @ %entry -; CHECK-V6-NEXT: smmul r1, r2, r1 -; CHECK-V6-NEXT: sub r0, r0, r1 -; CHECK-V6-NEXT: bx lr +; CHECK-V6V7-LABEL: Test0: +; CHECK-V6V7: @ %bb.0: @ %entry +; CHECK-V6V7-NEXT: smmul r1, r2, r1 +; CHECK-V6V7-NEXT: sub r0, r0, r1 +; CHECK-V6V7-NEXT: bx lr ; ; CHECK-THUMB-LABEL: Test0: ; CHECK-THUMB: @ %bb.0: @ %entry @@ -64,6 +64,23 @@ ; CHECK-THUMBV6T2-NEXT: subs r0, r0, r1 ; CHECK-THUMBV6T2-NEXT: bx lr ; +; CHECK-THUMBV7-LABEL: Test0: +; CHECK-THUMBV7: @ %bb.0: @ %entry +; CHECK-THUMBV7-NEXT: smmul r1, r2, r1 +; CHECK-THUMBV7-NEXT: subs r0, r0, r1 +; CHECK-THUMBV7-NEXT: bx lr +; +; CHECK-THUMBV7M-LABEL: Test0: +; CHECK-THUMBV7M: @ %bb.0: @ %entry +; CHECK-THUMBV7M-NEXT: smull r1, r2, r2, r1 +; CHECK-THUMBV7M-NEXT: subs r0, r0, r2 +; CHECK-THUMBV7M-NEXT: bx lr +; +; CHECK-THUMBV7EM-LABEL: Test0: +; CHECK-THUMBV7EM: @ %bb.0: @ %entry +; CHECK-THUMBV7EM-NEXT: smmul r1, r2, r1 +; CHECK-THUMBV7EM-NEXT: subs r0, r0, r1 +; CHECK-THUMBV7EM-NEXT: bx lr ; CHECK-V4-THUMBV7M-LABEL: Test0: ; CHECK-V4-THUMBV7M: @ %bb.0: @ %entry ; CHECK-V4-THUMBV7M-NEXT: smull r1, r2, r2, r1 @@ -88,10 +105,10 @@ ; CHECK-V4-NEXT: sbc r0, r0, r12 ; CHECK-V4-NEXT: mov pc, lr ; -; CHECK-V6-LABEL: Test1: -; CHECK-V6: @ %bb.0: @ %entry -; CHECK-V6-NEXT: smmls r0, r2, r1, r0 -; CHECK-V6-NEXT: bx lr +; CHECK-V6V7-LABEL: Test1: +; CHECK-V6V7: @ %bb.0: @ %entry +; CHECK-V6V7-NEXT: smmls r0, r2, r1, r0 +; CHECK-V6V7-NEXT: bx lr ; ; CHECK-THUMB-LABEL: Test1: ; CHECK-THUMB: @ %bb.0: @ %entry @@ -132,6 +149,22 @@ ; CHECK-THUMBV6T2-NEXT: smmls r0, r2, r1, r0 ; CHECK-THUMBV6T2-NEXT: bx lr ; +; CHECK-THUMBV7-LABEL: Test1: +; CHECK-THUMBV7: @ %bb.0: @ %entry +; CHECK-THUMBV7-NEXT: smmls r0, r2, r1, r0 +; CHECK-THUMBV7-NEXT: bx lr +; +; CHECK-THUMBV7M-LABEL: Test1: +; CHECK-THUMBV7M: @ %bb.0: @ %entry +; CHECK-THUMBV7M-NEXT: smull r1, r2, r2, r1 +; CHECK-THUMBV7M-NEXT: rsbs r1, r1, #0 +; CHECK-THUMBV7M-NEXT: sbcs r0, r2 +; CHECK-THUMBV7M-NEXT: bx lr +; +; CHECK-THUMBV7EM-LABEL: Test1: +; CHECK-THUMBV7EM: @ %bb.0: @ %entry +; CHECK-THUMBV7EM-NEXT: smmls r0, r2, r1, r0 +; CHECK-THUMBV7EM-NEXT: bx lr ; CHECK-V4-THUMBV7M-LABEL: Test1: ; CHECK-V4-THUMBV7M: @ %bb.0: @ %entry ; CHECK-V4-THUMBV7M-NEXT: smull r1, r2, r2, r1 @@ -166,21 +199,28 @@ ; CHECK-V4-NEXT: mov pc, lr ; ; CHECK-V6-LABEL: test_used_flags: -; CHECK-V6: @ %bb.0: +; CHECK-V6: @ %bb.0: @ %common.ret ; CHECK-V6-NEXT: .save {r11, lr} ; CHECK-V6-NEXT: push {r11, lr} -; CHECK-V6-NEXT: smull r0, r1, r0, r1 -; CHECK-V6-NEXT: rsbs r0, r0, #0 -; CHECK-V6-NEXT: rscs r0, r1, #0 -; CHECK-V6-NEXT: bge .LBB2_2 -; CHECK-V6-NEXT: @ %bb.1: @ %false +; CHECK-V6-NEXT: smull r1, r2, r0, r1 ; CHECK-V6-NEXT: mov r0, #56 +; CHECK-V6-NEXT: subs r1, r1, #1 +; CHECK-V6-NEXT: sbcs r1, r2, #0 +; CHECK-V6-NEXT: movlt r0, #42 ; CHECK-V6-NEXT: bl opaque ; CHECK-V6-NEXT: pop {r11, pc} -; CHECK-V6-NEXT: .LBB2_2: @ %true -; CHECK-V6-NEXT: mov r0, #42 -; CHECK-V6-NEXT: bl opaque -; CHECK-V6-NEXT: pop {r11, pc} +; +; CHECK-V7-LABEL: test_used_flags: +; CHECK-V7: @ %bb.0: @ %common.ret +; CHECK-V7-NEXT: .save {r11, lr} +; CHECK-V7-NEXT: push {r11, lr} +; CHECK-V7-NEXT: smull r1, r2, r0, r1 +; CHECK-V7-NEXT: mov r0, #56 +; CHECK-V7-NEXT: subs r1, r1, #1 +; CHECK-V7-NEXT: sbcs r1, r2, #0 +; CHECK-V7-NEXT: movwlt r0, #42 +; CHECK-V7-NEXT: bl opaque +; CHECK-V7-NEXT: pop {r11, pc} ; ; CHECK-THUMB-LABEL: test_used_flags: ; CHECK-THUMB: @ %bb.0: @@ -240,6 +280,44 @@ ; CHECK-THUMBV6T2-NEXT: bl opaque ; CHECK-THUMBV6T2-NEXT: pop {r7, pc} ; +; CHECK-THUMBV7-LABEL: test_used_flags: +; CHECK-THUMBV7: @ %bb.0: @ %common.ret +; CHECK-THUMBV7-NEXT: .save {r7, lr} +; CHECK-THUMBV7-NEXT: push {r7, lr} +; CHECK-THUMBV7-NEXT: smull r1, r2, r0, r1 +; CHECK-THUMBV7-NEXT: movs r0, #56 +; CHECK-THUMBV7-NEXT: subs r1, #1 +; CHECK-THUMBV7-NEXT: sbcs r1, r2, #0 +; CHECK-THUMBV7-NEXT: it lt +; CHECK-THUMBV7-NEXT: movlt r0, #42 +; CHECK-THUMBV7-NEXT: bl opaque +; CHECK-THUMBV7-NEXT: pop {r7, pc} +; +; CHECK-THUMBV7M-LABEL: test_used_flags: +; CHECK-THUMBV7M: @ %bb.0: @ %common.ret +; CHECK-THUMBV7M-NEXT: .save {r7, lr} +; CHECK-THUMBV7M-NEXT: push {r7, lr} +; CHECK-THUMBV7M-NEXT: smull r1, r2, r0, r1 +; CHECK-THUMBV7M-NEXT: movs r0, #56 +; CHECK-THUMBV7M-NEXT: subs r1, #1 +; CHECK-THUMBV7M-NEXT: sbcs r1, r2, #0 +; CHECK-THUMBV7M-NEXT: it lt +; CHECK-THUMBV7M-NEXT: movlt r0, #42 +; CHECK-THUMBV7M-NEXT: bl opaque +; CHECK-THUMBV7M-NEXT: pop {r7, pc} +; +; CHECK-THUMBV7EM-LABEL: test_used_flags: +; CHECK-THUMBV7EM: @ %bb.0: @ %common.ret +; CHECK-THUMBV7EM-NEXT: .save {r7, lr} +; CHECK-THUMBV7EM-NEXT: push {r7, lr} +; CHECK-THUMBV7EM-NEXT: smull r1, r2, r0, r1 +; CHECK-THUMBV7EM-NEXT: movs r0, #56 +; CHECK-THUMBV7EM-NEXT: subs r1, #1 +; CHECK-THUMBV7EM-NEXT: sbcs r1, r2, #0 +; CHECK-THUMBV7EM-NEXT: it lt +; CHECK-THUMBV7EM-NEXT: movlt r0, #42 +; CHECK-THUMBV7EM-NEXT: bl opaque +; CHECK-THUMBV7EM-NEXT: pop {r7, pc} ; CHECK-V4-THUMBV7M-LABEL: test_used_flags: ; CHECK-V4-THUMBV7M: @ %bb.0: ; CHECK-V4-THUMBV7M-NEXT: .save {r7, lr} diff --git a/llvm/test/CodeGen/ARM/speculation-hardening-sls.ll b/llvm/test/CodeGen/ARM/speculation-hardening-sls.ll --- a/llvm/test/CodeGen/ARM/speculation-hardening-sls.ll +++ b/llvm/test/CodeGen/ARM/speculation-hardening-sls.ll @@ -26,9 +26,9 @@ define dso_local i32 @double_return(i32 %a, i32 %b) local_unnamed_addr { ; NOHARDENARM-LABEL: double_return: ; NOHARDENARM: @ %bb.0: @ %entry -; NOHARDENARM-NEXT: cmp r0, #1 -; NOHARDENARM-NEXT: mulge r0, r1, r0 -; NOHARDENARM-NEXT: bxge lr +; NOHARDENARM-NEXT: cmp r0, #0 +; NOHARDENARM-NEXT: mulgt r0, r1, r0 +; NOHARDENARM-NEXT: bxgt lr ; NOHARDENARM-NEXT: .LBB0_1: @ %if.else ; NOHARDENARM-NEXT: sdiv r1, r0, r1 ; NOHARDENARM-NEXT: sdiv r1, r0, r1 @@ -37,8 +37,8 @@ ; ; NOHARDENTHUMB-LABEL: double_return: ; NOHARDENTHUMB: @ %bb.0: @ %entry -; NOHARDENTHUMB-NEXT: cmp r0, #1 -; NOHARDENTHUMB-NEXT: blt .LBB0_2 +; NOHARDENTHUMB-NEXT: cmp r0, #0 +; NOHARDENTHUMB-NEXT: ble .LBB0_2 ; NOHARDENTHUMB-NEXT: @ %bb.1: @ %if.then ; NOHARDENTHUMB-NEXT: muls r0, r1, r0 ; NOHARDENTHUMB-NEXT: bx lr diff --git a/llvm/test/CodeGen/ARM/switch-minsize.ll b/llvm/test/CodeGen/ARM/switch-minsize.ll --- a/llvm/test/CodeGen/ARM/switch-minsize.ll +++ b/llvm/test/CodeGen/ARM/switch-minsize.ll @@ -8,31 +8,29 @@ define void @f(i32 %val) optsize minsize { ; CHECK-LABEL: f: ; CHECK: @ %bb.0: -; CHECK-NEXT: str lr, [sp, #-4]! -; CHECK-NEXT: movw r1, #1154 -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: beq LBB0_5 +; CHECK-NEXT: mov r1, r0 +; CHECK-NEXT: movs r0, #1 +; CHECK-NEXT: cbz r1, LBB0_6 ; CHECK-NEXT: @ %bb.1: -; CHECK-NEXT: cmp r0, #9 +; CHECK-NEXT: movw r2, #1154 +; CHECK-NEXT: cmp r1, r2 ; CHECK-NEXT: beq LBB0_4 ; CHECK-NEXT: @ %bb.2: -; CHECK-NEXT: movw r1, #994 -; CHECK-NEXT: cmp r0, r1 -; CHECK-NEXT: beq LBB0_6 +; CHECK-NEXT: movw r2, #994 +; CHECK-NEXT: cmp r1, r2 +; CHECK-NEXT: beq LBB0_5 ; CHECK-NEXT: @ %bb.3: -; CHECK-NEXT: cbnz r0, LBB0_7 -; CHECK-NEXT: LBB0_4: @ %two -; CHECK-NEXT: movs r0, #1 -; CHECK-NEXT: b LBB0_8 -; CHECK-NEXT: LBB0_5: @ %four +; CHECK-NEXT: cmp r1, #9 +; CHECK-NEXT: it ne +; CHECK-NEXT: movne r0, #11 +; CHECK-NEXT: b LBB0_6 +; CHECK-NEXT: LBB0_4: @ %four ; CHECK-NEXT: movs r0, #87 -; CHECK-NEXT: b LBB0_8 -; CHECK-NEXT: LBB0_6: @ %three +; CHECK-NEXT: b LBB0_6 +; CHECK-NEXT: LBB0_5: @ %three ; CHECK-NEXT: movs r0, #78 -; CHECK-NEXT: b LBB0_8 -; CHECK-NEXT: LBB0_7: @ %def -; CHECK-NEXT: movs r0, #11 -; CHECK-NEXT: LBB0_8: @ %two +; CHECK-NEXT: LBB0_6: @ %common.ret +; CHECK-NEXT: str lr, [sp, #-4]! ; CHECK-NEXT: bl _g ; CHECK-NEXT: ldr lr, [sp], #4 ; CHECK-NEXT: bx lr diff --git a/llvm/test/CodeGen/Hexagon/dont_rotate_pregs_at_O2.ll b/llvm/test/CodeGen/Hexagon/dont_rotate_pregs_at_O2.ll --- a/llvm/test/CodeGen/Hexagon/dont_rotate_pregs_at_O2.ll +++ b/llvm/test/CodeGen/Hexagon/dont_rotate_pregs_at_O2.ll @@ -6,16 +6,14 @@ ; CHECK: .cfi_startproc ; CHECK-NEXT: // %bb.0: // %b0 ; CHECK-NEXT: { -; CHECK-NEXT: p0 = cmp.gt(r1,r0) -; CHECK-NEXT: if (p0.new) r0 = #0 -; CHECK-NEXT: if (p0.new) jumpr:nt r31 -; CHECK-NEXT: } -; CHECK-NEXT: .LBB0_1: // %b2 -; CHECK-NEXT: { ; CHECK-NEXT: p0 = cmp.gt(r1,#99) +; CHECK-NEXT: p1 = cmp.gt(r1,r0) ; CHECK-NEXT: } ; CHECK-NEXT: { ; CHECK-NEXT: r0 = mux(p0,##321,#123) +; CHECK-NEXT: } +; CHECK-NEXT: { +; CHECK-NEXT: if (p1) r0 = #0 ; CHECK-NEXT: jumpr r31 ; CHECK-NEXT: } b0: diff --git a/llvm/test/CodeGen/Hexagon/noFalignAfterCallAtO2.ll b/llvm/test/CodeGen/Hexagon/noFalignAfterCallAtO2.ll --- a/llvm/test/CodeGen/Hexagon/noFalignAfterCallAtO2.ll +++ b/llvm/test/CodeGen/Hexagon/noFalignAfterCallAtO2.ll @@ -30,8 +30,11 @@ ; CHECK-NEXT: { ; CHECK-NEXT: r0 = add(r16,r0) ; CHECK-NEXT: r17:16 = memd(r29+#0) -; CHECK-NEXT: dealloc_return +; CHECK-NEXT: deallocframe ; CHECK-NEXT: } // 8-byte Folded Reload +; CHECK-NEXT: { +; CHECK-NEXT: jumpr r31 +; CHECK-NEXT: } b0: %v0 = icmp eq i32 %a0, 0 br i1 %v0, label %b1, label %b2 diff --git a/llvm/test/CodeGen/Thumb2/thumb2-ifcvt1.ll b/llvm/test/CodeGen/Thumb2/thumb2-ifcvt1.ll --- a/llvm/test/CodeGen/Thumb2/thumb2-ifcvt1.ll +++ b/llvm/test/CodeGen/Thumb2/thumb2-ifcvt1.ll @@ -7,9 +7,9 @@ define i32 @t1(i32 %a, i32 %b, i32 %c, i32 %d) nounwind { ; ALL-LABEL: t1: ; ALL: @ %bb.0: -; ALL-NEXT: cmp r2, #7 +; ALL-NEXT: cmp r2, #1 ; ALL-NEXT: ittee ne -; ALL-NEXT: cmpne r2, #1 +; ALL-NEXT: cmpne r2, #7 ; ALL-NEXT: addne r0, r1 ; ALL-NEXT: addeq r0, r1 ; ALL-NEXT: addeq r0, #1 @@ -242,9 +242,9 @@ define void @t3(i32 %a, i32 %b) nounwind { ; V01-LABEL: t3: ; V01: @ %bb.0: @ %entry -; V01-NEXT: cmp r0, #11 -; V01-NEXT: it lt -; V01-NEXT: bxlt lr +; V01-NEXT: cmp r0, #10 +; V01-NEXT: it le +; V01-NEXT: bxle lr ; V01-NEXT: LBB4_1: @ %cond_true ; V01-NEXT: str lr, [sp, #-4]! ; V01-NEXT: mov r0, r1 @@ -254,14 +254,15 @@ ; ; V23-LABEL: t3: ; V23: @ %bb.0: @ %entry -; V23-NEXT: cmp r0, #11 -; V23-NEXT: it lt -; V23-NEXT: bxlt lr +; V23-NEXT: cmp r0, #10 +; V23-NEXT: it le +; V23-NEXT: bxle lr ; V23-NEXT: .LBB4_1: @ %cond_true ; V23-NEXT: push {r7, lr} ; V23-NEXT: mov r0, r1 ; V23-NEXT: bl foo -; V23-NEXT: pop {r7, pc} +; V23-NEXT: pop.w {r7, lr} +; V23-NEXT: bx lr entry: %tmp1 = icmp sgt i32 %a, 10 ; [#uses=1] br i1 %tmp1, label %cond_true, label %UnifiedReturnBlock diff --git a/llvm/test/CodeGen/Thumb2/tpsoft.ll b/llvm/test/CodeGen/Thumb2/tpsoft.ll --- a/llvm/test/CodeGen/Thumb2/tpsoft.ll +++ b/llvm/test/CodeGen/Thumb2/tpsoft.ll @@ -26,23 +26,23 @@ ; ELFASM-NEXT: ldr r1, [r0] ; ELFASM-NEXT: bl __aeabi_read_tp ; ELFASM-NEXT: ldr r0, [r0, r1] -; ELFASM-NEXT: cmp r0, #13 +; ELFASM-NEXT: cmp r0, #12 ; ELFASM-NEXT: beq .LBB0_3 ; ELFASM-NEXT: @ %bb.1: @ %entry -; ELFASM-NEXT: cmp r0, #12 +; ELFASM-NEXT: cmp r0, #13 ; ELFASM-NEXT: itt ne ; ELFASM-NEXT: movne.w r0, #-1 ; ELFASM-NEXT: popne {r7, pc} -; ELFASM-NEXT: .LBB0_2: @ %bb -; ELFASM-NEXT: movw r0, :lower16:a -; ELFASM-NEXT: movt r0, :upper16:a -; ELFASM-NEXT: pop.w {r7, lr} -; ELFASM-NEXT: b foo -; ELFASM-NEXT: .LBB0_3: @ %bb1 +; ELFASM-NEXT: .LBB0_2: @ %bb1 ; ELFASM-NEXT: movw r0, :lower16:b ; ELFASM-NEXT: movt r0, :upper16:b ; ELFASM-NEXT: pop.w {r7, lr} ; ELFASM-NEXT: b bar +; ELFASM-NEXT: .LBB0_3: @ %bb +; ELFASM-NEXT: movw r0, :lower16:a +; ELFASM-NEXT: movt r0, :upper16:a +; ELFASM-NEXT: pop.w {r7, lr} +; ELFASM-NEXT: b foo ; ELFASM-NEXT: .p2align 2 ; ELFASM-NEXT: @ %bb.4: ; ELFASM-NEXT: .LCPI0_0: diff --git a/llvm/test/CodeGen/Thumb2/v8_IT_4.ll b/llvm/test/CodeGen/Thumb2/v8_IT_4.ll --- a/llvm/test/CodeGen/Thumb2/v8_IT_4.ll +++ b/llvm/test/CodeGen/Thumb2/v8_IT_4.ll @@ -16,62 +16,60 @@ ; P01: @ %bb.0: @ %entry ; P01-NEXT: .save {r4, r5, r6, r7, r8, lr} ; P01-NEXT: push.w {r4, r5, r6, r7, r8, lr} -; P01-NEXT: mov r6, r1 -; P01-NEXT: mov r7, r0 -; P01-NEXT: bl _ZNKSs4sizeEv -; P01-NEXT: mov r8, r0 -; P01-NEXT: mov r0, r6 -; P01-NEXT: bl _ZNKSs4sizeEv -; P01-NEXT: mov r4, r8 -; P01-NEXT: cmp r0, r8 +; P01-NEXT: mov r8, r1 ; P01-NEXT: mov r5, r0 -; P01-NEXT: it lo -; P01-NEXT: movlo r4, r0 -; P01-NEXT: mov r0, r7 -; P01-NEXT: bl _ZNKSs7_M_dataEv +; P01-NEXT: bl _ZNKSs4sizeEv +; P01-NEXT: mov r4, r0 +; P01-NEXT: mov r0, r8 +; P01-NEXT: bl _ZNKSs4sizeEv +; P01-NEXT: mov r6, r4 +; P01-NEXT: cmp r4, r0 ; P01-NEXT: mov r7, r0 -; P01-NEXT: mov r0, r6 +; P01-NEXT: it hi +; P01-NEXT: movhi r6, r0 +; P01-NEXT: mov r0, r5 +; P01-NEXT: bl _ZNKSs7_M_dataEv +; P01-NEXT: mov r5, r0 +; P01-NEXT: mov r0, r8 ; P01-NEXT: bl _ZNKSs4dataEv ; P01-NEXT: mov r1, r0 -; P01-NEXT: mov r0, r7 -; P01-NEXT: mov r2, r4 +; P01-NEXT: mov r0, r5 +; P01-NEXT: mov r2, r6 ; P01-NEXT: bl memcmp -; P01-NEXT: cbz r0, .LBB0_2 -; P01-NEXT: @ %bb.1: @ %bb1 -; P01-NEXT: pop.w {r4, r5, r6, r7, r8, pc} -; P01-NEXT: .LBB0_2: @ %bb -; P01-NEXT: sub.w r0, r8, r5 +; P01-NEXT: subs r1, r4, r7 +; P01-NEXT: cmp r0, #0 +; P01-NEXT: it eq +; P01-NEXT: moveq r0, r1 ; P01-NEXT: pop.w {r4, r5, r6, r7, r8, pc} ; ; P23-LABEL: _ZNKSs7compareERKSs: ; P23: @ %bb.0: @ %entry ; P23-NEXT: .save {r4, r5, r6, r7, r8, lr} ; P23-NEXT: push.w {r4, r5, r6, r7, r8, lr} -; P23-NEXT: mov r7, r1 -; P23-NEXT: mov r5, r0 +; P23-NEXT: mov r8, r1 +; P23-NEXT: mov r7, r0 ; P23-NEXT: bl _ZNKSs4sizeEv -; P23-NEXT: mov r8, r0 -; P23-NEXT: mov r0, r7 -; P23-NEXT: bl _ZNKSs4sizeEv -; P23-NEXT: mov r4, r8 -; P23-NEXT: cmp r0, r8 ; P23-NEXT: mov r6, r0 -; P23-NEXT: it lo -; P23-NEXT: movlo r4, r0 -; P23-NEXT: mov r0, r5 -; P23-NEXT: bl _ZNKSs7_M_dataEv +; P23-NEXT: mov r0, r8 +; P23-NEXT: bl _ZNKSs4sizeEv +; P23-NEXT: mov r4, r6 +; P23-NEXT: cmp r6, r0 ; P23-NEXT: mov r5, r0 +; P23-NEXT: it hi +; P23-NEXT: movhi r4, r0 ; P23-NEXT: mov r0, r7 +; P23-NEXT: bl _ZNKSs7_M_dataEv +; P23-NEXT: mov r7, r0 +; P23-NEXT: mov r0, r8 ; P23-NEXT: bl _ZNKSs4dataEv ; P23-NEXT: mov r1, r0 -; P23-NEXT: mov r0, r5 +; P23-NEXT: mov r0, r7 ; P23-NEXT: mov r2, r4 ; P23-NEXT: bl memcmp -; P23-NEXT: cbz r0, .LBB0_2 -; P23-NEXT: @ %bb.1: @ %bb1 -; P23-NEXT: pop.w {r4, r5, r6, r7, r8, pc} -; P23-NEXT: .LBB0_2: @ %bb -; P23-NEXT: sub.w r0, r8, r6 +; P23-NEXT: subs r1, r6, r5 +; P23-NEXT: cmp r0, #0 +; P23-NEXT: it eq +; P23-NEXT: moveq r0, r1 ; P23-NEXT: pop.w {r4, r5, r6, r7, r8, pc} entry: %0 = tail call arm_aapcs_vfpcc i32 @_ZNKSs4sizeEv(%"struct.std::basic_string,std::allocator >"* %this) ; [#uses=3] diff --git a/llvm/test/Transforms/LoopUnroll/ARM/upperbound.ll b/llvm/test/Transforms/LoopUnroll/ARM/upperbound.ll --- a/llvm/test/Transforms/LoopUnroll/ARM/upperbound.ll +++ b/llvm/test/Transforms/LoopUnroll/ARM/upperbound.ll @@ -75,13 +75,10 @@ ; CHECK-NEXT: entry: ; CHECK-NEXT: [[L86_OFF:%.*]] = add i32 [[L86:%.*]], -1 ; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i32 [[L86_OFF]], 24 -; CHECK-NEXT: br i1 [[SWITCH]], label [[COMMON_RET:%.*]], label [[FOR_INC_I_3_I_5:%.*]] -; CHECK: common.ret: -; CHECK-NEXT: ret i32 0 -; CHECK: for.inc.i.3.i.5: ; CHECK-NEXT: [[DOTNOT30:%.*]] = icmp ne i32 [[L86]], 25 ; CHECK-NEXT: [[SPEC_SELECT24:%.*]] = zext i1 [[DOTNOT30]] to i32 -; CHECK-NEXT: ret i32 [[SPEC_SELECT24]] +; CHECK-NEXT: [[COMMON_RET31_OP:%.*]] = select i1 [[SWITCH]], i32 0, i32 [[SPEC_SELECT24]] +; CHECK-NEXT: ret i32 [[COMMON_RET31_OP]] ; entry: br label %for.body.i.i diff --git a/llvm/test/Transforms/PGOProfile/chr.ll b/llvm/test/Transforms/PGOProfile/chr.ll --- a/llvm/test/Transforms/PGOProfile/chr.ll +++ b/llvm/test/Transforms/PGOProfile/chr.ll @@ -390,9 +390,12 @@ ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[TMP0]], 3 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 3 ; CHECK-NEXT: br i1 [[TMP2]], label [[ENTRY_SPLIT:%.*]], label [[ENTRY_SPLIT_NONCHR:%.*]], !prof [[PROF15]] +; CHECK: common.ret: +; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[TMP3:%.*]], [[ENTRY_SPLIT]] ], [ [[SUM2_NONCHR:%.*]], [[ENTRY_SPLIT_NONCHR]] ] +; CHECK-NEXT: ret i32 [[COMMON_RET_OP]] ; CHECK: entry.split: -; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[SUM0:%.*]], 85 -; CHECK-NEXT: ret i32 [[TMP3]] +; CHECK-NEXT: [[TMP3]] = add i32 [[SUM0:%.*]], 85 +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: entry.split.nonchr: ; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[SUM0]], 42 ; CHECK-NEXT: [[TMP5:%.*]] = and i32 [[TMP0]], 1 @@ -401,8 +404,8 @@ ; CHECK-NEXT: [[TMP6:%.*]] = and i32 [[TMP0]], 2 ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[TMP6]], 0 ; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[SUM1_NONCHR]], 43 -; CHECK-NEXT: [[SUM2_NONCHR:%.*]] = select i1 [[TMP7]], i32 [[SUM1_NONCHR]], i32 [[TMP8]], !prof [[PROF16]] -; CHECK-NEXT: ret i32 [[SUM2_NONCHR]] +; CHECK-NEXT: [[SUM2_NONCHR]] = select i1 [[TMP7]], i32 [[SUM1_NONCHR]], i32 [[TMP8]], !prof [[PROF16]] +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: %0 = load i32, i32* %i @@ -2009,14 +2012,16 @@ ; CHECK-NEXT: [[V2:%.*]] = add i64 [[REASS_ADD]], 3 ; CHECK-NEXT: [[C1:%.*]] = icmp slt i64 [[V2]], 100 ; CHECK-NEXT: br i1 [[C1]], label [[BB0_SPLIT:%.*]], label [[BB0_SPLIT_NONCHR:%.*]], !prof [[PROF15]] +; CHECK: common.ret: +; CHECK-NEXT: ret i64 99 ; CHECK: bb0.split: ; CHECK-NEXT: [[V299:%.*]] = mul i64 [[V2]], 7860086430977039991 ; CHECK-NEXT: store i64 [[V299]], i64* [[J:%.*]], align 4 -; CHECK-NEXT: ret i64 99 +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: bb0.split.nonchr: ; CHECK-NEXT: [[V299_NONCHR:%.*]] = mul i64 [[V2]], 7860086430977039991 ; CHECK-NEXT: store i64 [[V299_NONCHR]], i64* [[J]], align 4 -; CHECK-NEXT: ret i64 99 +; CHECK-NEXT: br label [[COMMON_RET]] ; bb0: %v1 = add i64 %v0, 3 diff --git a/llvm/test/Transforms/PhaseOrdering/X86/simplifycfg-late.ll b/llvm/test/Transforms/PhaseOrdering/X86/simplifycfg-late.ll --- a/llvm/test/Transforms/PhaseOrdering/X86/simplifycfg-late.ll +++ b/llvm/test/Transforms/PhaseOrdering/X86/simplifycfg-late.ll @@ -11,14 +11,15 @@ ; CHECK-NEXT: entry: ; CHECK-NEXT: [[SWITCH_TABLEIDX:%.*]] = add i32 [[C:%.*]], -42 ; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i32 [[SWITCH_TABLEIDX]], 7 -; CHECK-NEXT: br i1 [[TMP0]], label [[SWITCH_LOOKUP:%.*]], label [[RETURN:%.*]] +; CHECK-NEXT: br i1 [[TMP0]], label [[SWITCH_LOOKUP:%.*]], label [[COMMON_RET:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[SWITCH_LOAD:%.*]], [[SWITCH_LOOKUP]] ], [ 15, [[ENTRY:%.*]] ] +; CHECK-NEXT: ret i32 [[COMMON_RET_OP]] ; CHECK: switch.lookup: ; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[SWITCH_TABLEIDX]] to i64 ; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [7 x i32], [7 x i32]* @switch.table.f, i64 0, i64 [[TMP1]] -; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i32, i32* [[SWITCH_GEP]], align 4 -; CHECK-NEXT: ret i32 [[SWITCH_LOAD]] -; CHECK: return: -; CHECK-NEXT: ret i32 15 +; CHECK-NEXT: [[SWITCH_LOAD]] = load i32, i32* [[SWITCH_GEP]], align 4 +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: switch i32 %c, label %sw.default [ diff --git a/llvm/test/Transforms/PhaseOrdering/X86/vector-reductions.ll b/llvm/test/Transforms/PhaseOrdering/X86/vector-reductions.ll --- a/llvm/test/Transforms/PhaseOrdering/X86/vector-reductions.ll +++ b/llvm/test/Transforms/PhaseOrdering/X86/vector-reductions.ll @@ -292,14 +292,15 @@ ; CHECK-NEXT: [[CMP:%.*]] = fcmp olt double [[DIV]], 0x3EB0C6F7A0B5ED8D ; CHECK-NEXT: [[CMP4:%.*]] = fcmp olt double [[DIV3]], 0x3EB0C6F7A0B5ED8D ; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[CMP]], i1 [[CMP4]], i1 false -; CHECK-NEXT: br i1 [[OR_COND]], label [[CLEANUP:%.*]], label [[LOR_LHS_FALSE:%.*]] +; CHECK-NEXT: br i1 [[OR_COND]], label [[COMMON_RET:%.*]], label [[LOR_LHS_FALSE:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i1 [ [[OR_COND1:%.*]], [[LOR_LHS_FALSE]] ], [ false, [[ENTRY:%.*]] ] +; CHECK-NEXT: ret i1 [[COMMON_RET_OP]] ; CHECK: lor.lhs.false: ; CHECK-NEXT: [[CMP5:%.*]] = fcmp ule double [[DIV]], 1.000000e+00 ; CHECK-NEXT: [[CMP7:%.*]] = fcmp ule double [[DIV3]], 1.000000e+00 -; CHECK-NEXT: [[OR_COND1:%.*]] = select i1 [[CMP5]], i1 true, i1 [[CMP7]] -; CHECK-NEXT: ret i1 [[OR_COND1]] -; CHECK: cleanup: -; CHECK-NEXT: ret i1 false +; CHECK-NEXT: [[OR_COND1]] = select i1 [[CMP5]], i1 true, i1 [[CMP7]] +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: %fneg = fneg double %b diff --git a/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll b/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll --- a/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll +++ b/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll @@ -16,31 +16,28 @@ define void @caller1(i1 %c, i64* align 1 %ptr) { ; ASSUMPTIONS-OFF-LABEL: @caller1( -; ASSUMPTIONS-OFF-NEXT: br i1 [[C:%.*]], label [[TRUE2:%.*]], label [[FALSE2:%.*]] -; ASSUMPTIONS-OFF: true2: +; ASSUMPTIONS-OFF-NEXT: br i1 [[C:%.*]], label [[COMMON_RET:%.*]], label [[FALSE2:%.*]] +; ASSUMPTIONS-OFF: common.ret: +; ASSUMPTIONS-OFF-NEXT: [[DOTSINK:%.*]] = phi i64 [ 3, [[FALSE2]] ], [ 2, [[TMP0:%.*]] ] ; ASSUMPTIONS-OFF-NEXT: store volatile i64 0, i64* [[PTR:%.*]], align 8 ; ASSUMPTIONS-OFF-NEXT: store volatile i64 -1, i64* [[PTR]], align 4 ; ASSUMPTIONS-OFF-NEXT: store volatile i64 -1, i64* [[PTR]], align 4 ; ASSUMPTIONS-OFF-NEXT: store volatile i64 -1, i64* [[PTR]], align 4 ; ASSUMPTIONS-OFF-NEXT: store volatile i64 -1, i64* [[PTR]], align 4 ; ASSUMPTIONS-OFF-NEXT: store volatile i64 -1, i64* [[PTR]], align 4 -; ASSUMPTIONS-OFF-NEXT: store volatile i64 2, i64* [[PTR]], align 4 +; ASSUMPTIONS-OFF-NEXT: store volatile i64 [[DOTSINK]], i64* [[PTR]], align 4 ; ASSUMPTIONS-OFF-NEXT: ret void ; ASSUMPTIONS-OFF: false2: ; ASSUMPTIONS-OFF-NEXT: store volatile i64 1, i64* [[PTR]], align 4 -; ASSUMPTIONS-OFF-NEXT: store volatile i64 0, i64* [[PTR]], align 8 -; ASSUMPTIONS-OFF-NEXT: store volatile i64 -1, i64* [[PTR]], align 4 -; ASSUMPTIONS-OFF-NEXT: store volatile i64 -1, i64* [[PTR]], align 4 -; ASSUMPTIONS-OFF-NEXT: store volatile i64 -1, i64* [[PTR]], align 4 -; ASSUMPTIONS-OFF-NEXT: store volatile i64 -1, i64* [[PTR]], align 4 -; ASSUMPTIONS-OFF-NEXT: store volatile i64 -1, i64* [[PTR]], align 4 -; ASSUMPTIONS-OFF-NEXT: store volatile i64 3, i64* [[PTR]], align 4 -; ASSUMPTIONS-OFF-NEXT: ret void +; ASSUMPTIONS-OFF-NEXT: br label [[COMMON_RET]] ; ; ASSUMPTIONS-ON-LABEL: @caller1( -; ASSUMPTIONS-ON-NEXT: br i1 [[C:%.*]], label [[TRUE2_CRITEDGE:%.*]], label [[FALSE1:%.*]] +; ASSUMPTIONS-ON-NEXT: br i1 [[C:%.*]], label [[COMMON_RET:%.*]], label [[FALSE1:%.*]] ; ASSUMPTIONS-ON: false1: ; ASSUMPTIONS-ON-NEXT: store volatile i64 1, i64* [[PTR:%.*]], align 8 +; ASSUMPTIONS-ON-NEXT: br label [[COMMON_RET]] +; ASSUMPTIONS-ON: common.ret: +; ASSUMPTIONS-ON-NEXT: [[DOTSINK:%.*]] = phi i64 [ 3, [[FALSE1]] ], [ 2, [[TMP0:%.*]] ] ; ASSUMPTIONS-ON-NEXT: call void @llvm.assume(i1 true) [ "align"(i64* [[PTR]], i64 8) ] ; ASSUMPTIONS-ON-NEXT: store volatile i64 0, i64* [[PTR]], align 8 ; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 @@ -48,17 +45,7 @@ ; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 ; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 ; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 -; ASSUMPTIONS-ON-NEXT: store volatile i64 3, i64* [[PTR]], align 8 -; ASSUMPTIONS-ON-NEXT: ret void -; ASSUMPTIONS-ON: true2.critedge: -; ASSUMPTIONS-ON-NEXT: call void @llvm.assume(i1 true) [ "align"(i64* [[PTR]], i64 8) ] -; ASSUMPTIONS-ON-NEXT: store volatile i64 0, i64* [[PTR]], align 8 -; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 -; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 -; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 -; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 -; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 -; ASSUMPTIONS-ON-NEXT: store volatile i64 2, i64* [[PTR]], align 8 +; ASSUMPTIONS-ON-NEXT: store volatile i64 [[DOTSINK]], i64* [[PTR]], align 8 ; ASSUMPTIONS-ON-NEXT: ret void ; br i1 %c, label %true1, label %false1 diff --git a/llvm/test/Transforms/PruneEH/ipo-nounwind.ll b/llvm/test/Transforms/PruneEH/ipo-nounwind.ll --- a/llvm/test/Transforms/PruneEH/ipo-nounwind.ll +++ b/llvm/test/Transforms/PruneEH/ipo-nounwind.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -prune-eh -enable-new-pm=0 < %s | FileCheck %s -; RUN: opt -S -passes='function-attrs,function(simplify-cfg)' < %s | FileCheck %s +; RUN: opt -S -prune-eh -enable-new-pm=0 < %s | FileCheck --check-prefixes=ALL,OLDPM %s +; RUN: opt -S -passes='function-attrs,function(simplify-cfg)' < %s | FileCheck --check-prefixes=ALL,NEWPM %s declare void @may_throw() @@ -23,25 +23,36 @@ ; } define linkonce_odr void @callee(i32* %ptr) noinline { -; CHECK-LABEL: @callee( -; CHECK-NEXT: ret void +; ALL-LABEL: @callee( +; ALL-NEXT: ret void ; ret void } define i32 @caller(i32* %ptr) personality i32 3 { -; CHECK-LABEL: @caller( -; CHECK-NEXT: entry: -; CHECK-NEXT: invoke void @callee(i32* [[PTR:%.*]]) -; CHECK-NEXT: to label [[NORMAL:%.*]] unwind label [[UNWIND:%.*]] -; CHECK: normal: -; CHECK-NEXT: ret i32 1 -; CHECK: unwind: -; CHECK-NEXT: [[RES:%.*]] = landingpad { i8*, i32 } -; CHECK-NEXT: cleanup -; CHECK-NEXT: ret i32 2 +; OLDPM-LABEL: @caller( +; OLDPM-NEXT: entry: +; OLDPM-NEXT: invoke void @callee(i32* [[PTR:%.*]]) +; OLDPM-NEXT: to label [[NORMAL:%.*]] unwind label [[UNWIND:%.*]] +; OLDPM: normal: +; OLDPM-NEXT: ret i32 1 +; OLDPM: unwind: +; OLDPM-NEXT: [[RES:%.*]] = landingpad { i8*, i32 } +; OLDPM-NEXT: cleanup +; OLDPM-NEXT: ret i32 2 +; +; NEWPM-LABEL: @caller( +; NEWPM-NEXT: entry: +; NEWPM-NEXT: invoke void @callee(i32* [[PTR:%.*]]) +; NEWPM-NEXT: to label [[COMMON_RET:%.*]] unwind label [[UNWIND:%.*]] +; NEWPM: common.ret: +; NEWPM-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ 2, [[UNWIND]] ], [ 1, [[ENTRY:%.*]] ] +; NEWPM-NEXT: ret i32 [[COMMON_RET_OP]] +; NEWPM: unwind: +; NEWPM-NEXT: [[RES:%.*]] = landingpad { i8*, i32 } +; NEWPM-NEXT: cleanup +; NEWPM-NEXT: br label [[COMMON_RET]] ; - entry: invoke void @callee(i32* %ptr) to label %normal unwind label %unwind diff --git a/llvm/test/Transforms/SimplifyCFG/DeadSetCC.ll b/llvm/test/Transforms/SimplifyCFG/DeadSetCC.ll --- a/llvm/test/Transforms/SimplifyCFG/DeadSetCC.ll +++ b/llvm/test/Transforms/SimplifyCFG/DeadSetCC.ll @@ -13,12 +13,14 @@ ; CHECK-NEXT: [[V_OFF:%.*]] = add i32 [[V:%.*]], -15 ; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i32 [[V_OFF]], 2 ; CHECK-NEXT: br i1 [[SWITCH]], label [[L2:%.*]], label [[L1:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: L1: ; CHECK-NEXT: call void @foo() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: L2: ; CHECK-NEXT: call void @bar() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; %C = icmp eq i32 %V, 18 %D = icmp eq i32 %V, 180 diff --git a/llvm/test/Transforms/SimplifyCFG/FoldValueComparisonIntoPredecessors-domtree-preservation-edgecase-2.ll b/llvm/test/Transforms/SimplifyCFG/FoldValueComparisonIntoPredecessors-domtree-preservation-edgecase-2.ll --- a/llvm/test/Transforms/SimplifyCFG/FoldValueComparisonIntoPredecessors-domtree-preservation-edgecase-2.ll +++ b/llvm/test/Transforms/SimplifyCFG/FoldValueComparisonIntoPredecessors-domtree-preservation-edgecase-2.ll @@ -13,7 +13,7 @@ ; CHECK-NEXT: to label [[BB14:%.*]] unwind label [[BB21:%.*]] ; CHECK: bb14: ; CHECK-NEXT: [[I0:%.*]] = invoke i16 @baz() -; CHECK-NEXT: to label [[BB15:%.*]] unwind label [[BB25:%.*]] +; CHECK-NEXT: to label [[BB15:%.*]] unwind label [[BB21]] ; CHECK: bb15: ; CHECK-NEXT: switch i16 [[I0]], label [[BB19:%.*]] [ ; CHECK-NEXT: i16 42, label [[BB23:%.*]] @@ -21,22 +21,18 @@ ; CHECK-NEXT: ] ; CHECK: bb19: ; CHECK-NEXT: invoke void @snork() -; CHECK-NEXT: to label [[BB20:%.*]] unwind label [[BB25]] +; CHECK-NEXT: to label [[BB20:%.*]] unwind label [[BB21]] ; CHECK: bb20: ; CHECK-NEXT: unreachable +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: bb21: ; CHECK-NEXT: [[I22:%.*]] = landingpad { i8*, i32 } ; CHECK-NEXT: cleanup -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: bb23: ; CHECK-NEXT: invoke void @spam() -; CHECK-NEXT: to label [[BB24:%.*]] unwind label [[BB25]] -; CHECK: bb24: -; CHECK-NEXT: ret void -; CHECK: bb25: -; CHECK-NEXT: [[I26:%.*]] = landingpad { i8*, i32 } -; CHECK-NEXT: cleanup -; CHECK-NEXT: br label [[BB24]] +; CHECK-NEXT: to label [[COMMON_RET]] unwind label [[BB21]] ; bb: invoke void @widget() diff --git a/llvm/test/Transforms/SimplifyCFG/Hexagon/switch-to-lookup-table.ll b/llvm/test/Transforms/SimplifyCFG/Hexagon/switch-to-lookup-table.ll --- a/llvm/test/Transforms/SimplifyCFG/Hexagon/switch-to-lookup-table.ll +++ b/llvm/test/Transforms/SimplifyCFG/Hexagon/switch-to-lookup-table.ll @@ -11,13 +11,14 @@ ; ENABLE-LABEL: @foo( ; ENABLE-NEXT: entry: ; ENABLE-NEXT: [[TMP0:%.*]] = icmp ult i32 [[X:%.*]], 6 -; ENABLE-NEXT: br i1 [[TMP0]], label [[SWITCH_LOOKUP:%.*]], label [[RETURN:%.*]] +; ENABLE-NEXT: br i1 [[TMP0]], label [[SWITCH_LOOKUP:%.*]], label [[COMMON_RET:%.*]] +; ENABLE: common.ret: +; ENABLE-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[SWITCH_LOAD:%.*]], [[SWITCH_LOOKUP]] ], [ 19, [[ENTRY:%.*]] ] +; ENABLE-NEXT: ret i32 [[COMMON_RET_OP]] ; ENABLE: switch.lookup: ; ENABLE-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [6 x i32], [6 x i32]* @switch.table.foo, i32 0, i32 [[X]] -; ENABLE-NEXT: [[SWITCH_LOAD:%.*]] = load i32, i32* [[SWITCH_GEP]], align 4 -; ENABLE-NEXT: ret i32 [[SWITCH_LOAD]] -; ENABLE: return: -; ENABLE-NEXT: ret i32 19 +; ENABLE-NEXT: [[SWITCH_LOAD]] = load i32, i32* [[SWITCH_GEP]], align 4 +; ENABLE-NEXT: br label [[COMMON_RET]] ; ; DISABLE-LABEL: @foo( ; DISABLE-NEXT: entry: diff --git a/llvm/test/Transforms/SimplifyCFG/HoistCode.ll b/llvm/test/Transforms/SimplifyCFG/HoistCode.ll --- a/llvm/test/Transforms/SimplifyCFG/HoistCode.ll +++ b/llvm/test/Transforms/SimplifyCFG/HoistCode.ll @@ -4,6 +4,7 @@ define void @foo(i1 %C, i32* %P) { ; CHECK-LABEL: @foo( +; CHECK-NEXT: common.ret: ; CHECK-NEXT: store i32 7, i32* [[P:%.*]], align 4 ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/SimplifyCFG/basictest.ll b/llvm/test/Transforms/SimplifyCFG/basictest.ll --- a/llvm/test/Transforms/SimplifyCFG/basictest.ll +++ b/llvm/test/Transforms/SimplifyCFG/basictest.ll @@ -34,12 +34,12 @@ ; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[CMP2:%.*]] = icmp ugt i32 [[A]], 0 ; CHECK-NEXT: [[OR_COND:%.*]] = and i1 [[CMP1]], [[CMP2]] -; CHECK-NEXT: br i1 [[OR_COND]], label [[ELSE:%.*]], label [[UNTAKEN:%.*]] +; CHECK-NEXT: br i1 [[OR_COND]], label [[ELSE:%.*]], label [[COMMON_RET:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: else: ; CHECK-NEXT: call void @foo() -; CHECK-NEXT: ret void -; CHECK: untaken: -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; %cmp1 = icmp eq i32 %a, %b br i1 %cmp1, label %taken, label %untaken diff --git a/llvm/test/Transforms/SimplifyCFG/branch-fold.ll b/llvm/test/Transforms/SimplifyCFG/branch-fold.ll --- a/llvm/test/Transforms/SimplifyCFG/branch-fold.ll +++ b/llvm/test/Transforms/SimplifyCFG/branch-fold.ll @@ -6,12 +6,12 @@ ; CHECK-NEXT: entry: ; CHECK-NEXT: [[A_NOT:%.*]] = xor i1 [[A:%.*]], true ; CHECK-NEXT: [[BRMERGE:%.*]] = select i1 [[A_NOT]], i1 true, i1 [[B:%.*]] -; CHECK-NEXT: br i1 [[BRMERGE]], label [[B:%.*]], label [[C:%.*]] +; CHECK-NEXT: br i1 [[BRMERGE]], label [[B:%.*]], label [[COMMON_RET:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: b: ; CHECK-NEXT: store i32 123, i32* [[P:%.*]], align 4 -; CHECK-NEXT: ret void -; CHECK: c: -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: diff --git a/llvm/test/Transforms/SimplifyCFG/branch-phi-thread.ll b/llvm/test/Transforms/SimplifyCFG/branch-phi-thread.ll --- a/llvm/test/Transforms/SimplifyCFG/branch-phi-thread.ll +++ b/llvm/test/Transforms/SimplifyCFG/branch-phi-thread.ll @@ -14,12 +14,15 @@ ; CHECK-NEXT: E: ; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[X:%.*]], 0 ; CHECK-NEXT: br i1 [[C]], label [[B:%.*]], label [[F:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ 345, [[B]] ], [ 123, [[F]] ] +; CHECK-NEXT: ret i32 [[COMMON_RET_OP]] ; CHECK: B: ; CHECK-NEXT: call void @f2() -; CHECK-NEXT: ret i32 345 +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: F: ; CHECK-NEXT: call void @f3() -; CHECK-NEXT: ret i32 123 +; CHECK-NEXT: br label [[COMMON_RET]] ; E: %C = icmp eq i32 %X, 0 ; [#uses=2] @@ -42,12 +45,15 @@ ; CHECK-NEXT: E: ; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[X:%.*]], 0 ; CHECK-NEXT: br i1 [[C]], label [[B:%.*]], label [[F:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ 345, [[B]] ], [ 123, [[F]] ] +; CHECK-NEXT: ret i32 [[COMMON_RET_OP]] ; CHECK: B: ; CHECK-NEXT: call void @f2() -; CHECK-NEXT: ret i32 345 +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: F: ; CHECK-NEXT: call void @f3() -; CHECK-NEXT: ret i32 123 +; CHECK-NEXT: br label [[COMMON_RET]] ; E: %C = icmp eq i32 %X, 0 ; [#uses=2] @@ -71,15 +77,18 @@ ; CHECK-NEXT: E: ; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[X:%.*]], 0 ; CHECK-NEXT: br i1 [[C]], label [[B_CRITEDGE:%.*]], label [[F:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ 345, [[B_CRITEDGE]] ], [ 123, [[F]] ] +; CHECK-NEXT: ret i32 [[COMMON_RET_OP]] ; CHECK: B.critedge: ; CHECK-NEXT: call void @f3() ; CHECK-NEXT: [[XX_C:%.*]] = load i32, i32* [[AP:%.*]], align 4 ; CHECK-NEXT: store i32 [[XX_C]], i32* [[BP:%.*]], align 4 ; CHECK-NEXT: call void @f2() -; CHECK-NEXT: ret i32 345 +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: F: ; CHECK-NEXT: call void @f3() -; CHECK-NEXT: ret i32 123 +; CHECK-NEXT: br label [[COMMON_RET]] ; E: %C = icmp eq i32 %X, 0 ; [#uses=2] diff --git a/llvm/test/Transforms/SimplifyCFG/duplicate-landingpad.ll b/llvm/test/Transforms/SimplifyCFG/duplicate-landingpad.ll --- a/llvm/test/Transforms/SimplifyCFG/duplicate-landingpad.ll +++ b/llvm/test/Transforms/SimplifyCFG/duplicate-landingpad.ll @@ -13,14 +13,14 @@ ; CHECK-NEXT: to label [[INVOKE2:%.*]] unwind label [[LPAD2:%.*]] ; CHECK: invoke2: ; CHECK-NEXT: invoke void @fn() -; CHECK-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD2]] -; CHECK: invoke.cont: +; CHECK-NEXT: to label [[COMMON_RET:%.*]] unwind label [[LPAD2]] +; CHECK: common.ret: ; CHECK-NEXT: ret void ; CHECK: lpad2: ; CHECK-NEXT: [[EXN2:%.*]] = landingpad { i8*, i32 } ; CHECK-NEXT: cleanup ; CHECK-NEXT: call void @fn() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: invoke void @fn() @@ -56,8 +56,8 @@ ; CHECK-NEXT: to label [[INVOKE2:%.*]] unwind label [[LPAD1:%.*]] ; CHECK: invoke2: ; CHECK-NEXT: invoke void @fn() -; CHECK-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD2:%.*]] -; CHECK: invoke.cont: +; CHECK-NEXT: to label [[COMMON_RET:%.*]] unwind label [[LPAD2:%.*]] +; CHECK: common.ret: ; CHECK-NEXT: ret void ; CHECK: lpad1: ; CHECK-NEXT: [[EXN:%.*]] = landingpad { i8*, i32 } @@ -70,7 +70,7 @@ ; CHECK-NEXT: br label [[SHARED_RESUME]] ; CHECK: shared_resume: ; CHECK-NEXT: call void @fn() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: invoke void @fn() @@ -107,8 +107,8 @@ ; CHECK-NEXT: to label [[INVOKE2:%.*]] unwind label [[LPAD1:%.*]] ; CHECK: invoke2: ; CHECK-NEXT: invoke void @fn() -; CHECK-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD2:%.*]] -; CHECK: invoke.cont: +; CHECK-NEXT: to label [[COMMON_RET:%.*]] unwind label [[LPAD2:%.*]] +; CHECK: common.ret: ; CHECK-NEXT: ret void ; CHECK: lpad1: ; CHECK-NEXT: [[EXN:%.*]] = landingpad { i8*, i32 } @@ -120,7 +120,7 @@ ; CHECK-NEXT: br label [[SHARED_RESUME]] ; CHECK: shared_resume: ; CHECK-NEXT: call void @fn() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: invoke void @fn() diff --git a/llvm/test/Transforms/SimplifyCFG/fold-branch-to-common-dest-two-preds-cost.ll b/llvm/test/Transforms/SimplifyCFG/fold-branch-to-common-dest-two-preds-cost.ll --- a/llvm/test/Transforms/SimplifyCFG/fold-branch-to-common-dest-two-preds-cost.ll +++ b/llvm/test/Transforms/SimplifyCFG/fold-branch-to-common-dest-two-preds-cost.ll @@ -26,12 +26,14 @@ ; THR1-NEXT: [[V3_ADJ:%.*]] = add i8 [[V1]], [[V2]] ; THR1-NEXT: [[C3:%.*]] = icmp eq i8 [[V3_ADJ]], 0 ; THR1-NEXT: br i1 [[C3]], label [[FINAL_LEFT]], label [[FINAL_RIGHT]] +; THR1: common.ret: +; THR1-NEXT: ret void ; THR1: final_left: ; THR1-NEXT: call void @sideeffect0() -; THR1-NEXT: ret void +; THR1-NEXT: br label [[COMMON_RET:%.*]] ; THR1: final_right: ; THR1-NEXT: call void @sideeffect1() -; THR1-NEXT: ret void +; THR1-NEXT: br label [[COMMON_RET]] ; ; THR2-LABEL: @two_preds_with_extra_op( ; THR2-NEXT: entry: @@ -49,12 +51,14 @@ ; THR2-NEXT: [[C3:%.*]] = icmp eq i8 [[V3_ADJ]], 0 ; THR2-NEXT: [[OR_COND:%.*]] = select i1 [[C2]], i1 [[C3]], i1 false ; THR2-NEXT: br i1 [[OR_COND]], label [[FINAL_LEFT]], label [[FINAL_RIGHT]] +; THR2: common.ret: +; THR2-NEXT: ret void ; THR2: final_left: ; THR2-NEXT: call void @sideeffect0() -; THR2-NEXT: ret void +; THR2-NEXT: br label [[COMMON_RET:%.*]] ; THR2: final_right: ; THR2-NEXT: call void @sideeffect1() -; THR2-NEXT: ret void +; THR2-NEXT: br label [[COMMON_RET]] ; entry: %c0 = icmp eq i8 %v0, 0 @@ -99,12 +103,14 @@ ; ALL-NEXT: [[V3_ADJ_OLD:%.*]] = add i8 [[V1]], [[V2]] ; ALL-NEXT: [[C3_OLD:%.*]] = icmp eq i8 [[V3_ADJ_OLD]], 0 ; ALL-NEXT: br i1 [[C3_OLD]], label [[FINAL_LEFT]], label [[FINAL_RIGHT]] +; ALL: common.ret: +; ALL-NEXT: ret void ; ALL: final_left: ; ALL-NEXT: call void @sideeffect0() -; ALL-NEXT: ret void +; ALL-NEXT: br label [[COMMON_RET:%.*]] ; ALL: final_right: ; ALL-NEXT: call void @sideeffect1() -; ALL-NEXT: ret void +; ALL-NEXT: br label [[COMMON_RET]] ; entry: %c0 = icmp eq i8 %v0, 0 diff --git a/llvm/test/Transforms/SimplifyCFG/fold-branch-to-common-dest.ll b/llvm/test/Transforms/SimplifyCFG/fold-branch-to-common-dest.ll --- a/llvm/test/Transforms/SimplifyCFG/fold-branch-to-common-dest.ll +++ b/llvm/test/Transforms/SimplifyCFG/fold-branch-to-common-dest.ll @@ -16,12 +16,14 @@ ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[V1:%.*]], 0 ; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[C0]], i1 [[C1]], i1 false ; CHECK-NEXT: br i1 [[OR_COND]], label [[FINAL_LEFT:%.*]], label [[FINAL_RIGHT:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: final_left: ; CHECK-NEXT: call void @sideeffect0() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: final_right: ; CHECK-NEXT: call void @sideeffect1() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; pred: %c0 = icmp eq i8 %v0, 0 @@ -52,12 +54,14 @@ ; CHECK-NEXT: [[C3:%.*]] = icmp eq i8 [[V3]], 0 ; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[C2]], i1 [[C3]], i1 false ; CHECK-NEXT: br i1 [[OR_COND]], label [[FINAL_LEFT]], label [[FINAL_RIGHT]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: final_left: ; CHECK-NEXT: call void @sideeffect0() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: final_right: ; CHECK-NEXT: call void @sideeffect1() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: %c0 = icmp eq i8 %v0, 0 @@ -89,12 +93,14 @@ ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[V1_ADJ]], 0 ; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[C0]], i1 [[C1]], i1 false ; CHECK-NEXT: br i1 [[OR_COND]], label [[FINAL_LEFT:%.*]], label [[FINAL_RIGHT:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: final_left: ; CHECK-NEXT: call void @sideeffect0() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: final_right: ; CHECK-NEXT: call void @sideeffect1() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; pred: %c0 = icmp eq i8 %v0, 0 @@ -128,12 +134,14 @@ ; CHECK-NEXT: [[C3:%.*]] = icmp eq i8 [[V3_ADJ]], 0 ; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[C2]], i1 [[C3]], i1 false ; CHECK-NEXT: br i1 [[OR_COND]], label [[FINAL_LEFT]], label [[FINAL_RIGHT]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: final_left: ; CHECK-NEXT: call void @sideeffect0() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: final_right: ; CHECK-NEXT: call void @sideeffect1() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: %c0 = icmp eq i8 %v0, 0 @@ -167,12 +175,14 @@ ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[V1_ADJ_ADJ]], 0 ; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[C0]], i1 [[C1]], i1 false ; CHECK-NEXT: br i1 [[OR_COND]], label [[FINAL_LEFT:%.*]], label [[FINAL_RIGHT:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: final_left: ; CHECK-NEXT: call void @sideeffect0() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: final_right: ; CHECK-NEXT: call void @sideeffect1() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; pred: %c0 = icmp eq i8 %v0, 0 @@ -209,12 +219,14 @@ ; CHECK-NEXT: [[C3:%.*]] = icmp eq i8 [[V3_ADJ_ADJ]], 0 ; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[C2]], i1 [[C3]], i1 false ; CHECK-NEXT: br i1 [[OR_COND]], label [[FINAL_LEFT]], label [[FINAL_RIGHT]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: final_left: ; CHECK-NEXT: call void @sideeffect0() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: final_right: ; CHECK-NEXT: call void @sideeffect1() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: %c0 = icmp eq i8 %v0, 0 @@ -249,13 +261,15 @@ ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[V1_ADJ]], 0 ; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[C0]], i1 [[C1]], i1 false ; CHECK-NEXT: br i1 [[OR_COND]], label [[FINAL_LEFT:%.*]], label [[FINAL_RIGHT:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: final_left: ; CHECK-NEXT: call void @sideeffect0() ; CHECK-NEXT: call void @use8(i8 [[V1_ADJ]]) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: final_right: ; CHECK-NEXT: call void @sideeffect1() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; pred: %c0 = icmp eq i8 %v0, 0 @@ -280,14 +294,16 @@ ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[V1_ADJ]], 0 ; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[C0]], i1 [[C1]], i1 false ; CHECK-NEXT: br i1 [[OR_COND]], label [[FINAL_LEFT:%.*]], label [[FINAL_RIGHT:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: final_left: ; CHECK-NEXT: call void @sideeffect0() ; CHECK-NEXT: call void @use8(i8 [[V1_ADJ]]) ; CHECK-NEXT: call void @use8(i8 [[V1_ADJ]]) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: final_right: ; CHECK-NEXT: call void @sideeffect1() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; pred: %c0 = icmp eq i8 %v0, 0 @@ -321,14 +337,16 @@ ; CHECK-NEXT: call void @sideeffect0() ; CHECK-NEXT: call void @use8(i8 [[V2_ADJ]]) ; CHECK-NEXT: br label [[LEFT_END]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: left_end: ; CHECK-NEXT: [[MERGE_LEFT:%.*]] = phi i8 [ [[V2_ADJ]], [[FINAL_LEFT]] ], [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: call void @sideeffect1() ; CHECK-NEXT: call void @use8(i8 [[MERGE_LEFT]]) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: final_right: ; CHECK-NEXT: call void @sideeffect2() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: %c0 = icmp eq i8 %v0, 0 @@ -372,14 +390,16 @@ ; CHECK-NEXT: [[V3_ADJ_OLD:%.*]] = add i8 [[V1]], [[V2]] ; CHECK-NEXT: [[C3_OLD:%.*]] = icmp eq i8 [[V3_ADJ_OLD]], 0 ; CHECK-NEXT: br i1 [[C3_OLD]], label [[FINAL_LEFT]], label [[FINAL_RIGHT]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: final_left: ; CHECK-NEXT: [[MERGE_LEFT:%.*]] = phi i8 [ [[V3_ADJ_OLD]], [[DISPATCH]] ], [ 0, [[PRED0]] ], [ [[V3_ADJ]], [[PRED1]] ] ; CHECK-NEXT: call void @use8(i8 [[MERGE_LEFT]]) ; CHECK-NEXT: call void @sideeffect0() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: final_right: ; CHECK-NEXT: call void @sideeffect1() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: %c0 = icmp eq i8 %v0, 0 @@ -422,16 +442,18 @@ ; CHECK-NEXT: [[V3_ADJ_OLD:%.*]] = add i8 [[V1]], [[V2]] ; CHECK-NEXT: [[C3_OLD:%.*]] = icmp eq i8 [[V3_ADJ_OLD]], 0 ; CHECK-NEXT: br i1 [[C3_OLD]], label [[FINAL_LEFT]], label [[FINAL_RIGHT]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: final_left: ; CHECK-NEXT: [[MERGE_LEFT:%.*]] = phi i8 [ [[V3_ADJ_OLD]], [[DISPATCH]] ], [ 0, [[PRED0]] ], [ [[V3_ADJ]], [[PRED1]] ] ; CHECK-NEXT: [[MERGE_LEFT_2:%.*]] = phi i8 [ [[V3_ADJ_OLD]], [[DISPATCH]] ], [ 42, [[PRED0]] ], [ [[V3_ADJ]], [[PRED1]] ] ; CHECK-NEXT: call void @use8(i8 [[MERGE_LEFT]]) ; CHECK-NEXT: call void @use8(i8 [[MERGE_LEFT_2]]) ; CHECK-NEXT: call void @sideeffect0() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: final_right: ; CHECK-NEXT: call void @sideeffect1() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: %c0 = icmp eq i8 %v0, 0 @@ -469,13 +491,15 @@ ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[V1]], 0 ; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[C0]], i1 [[C1]], i1 false ; CHECK-NEXT: br i1 [[OR_COND]], label [[FINAL_LEFT:%.*]], label [[FINAL_RIGHT:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: final_left: ; CHECK-NEXT: call void @sideeffect0() ; CHECK-NEXT: call void @use8(i8 [[V1_ADJ]]) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: final_right: ; CHECK-NEXT: call void @sideeffect1() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; pred: %c0 = icmp eq i8 %v0, 0 @@ -500,14 +524,16 @@ ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[V1]], 0 ; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[C0]], i1 [[C1]], i1 false ; CHECK-NEXT: br i1 [[OR_COND]], label [[FINAL_LEFT:%.*]], label [[FINAL_RIGHT:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: final_left: ; CHECK-NEXT: call void @sideeffect0() ; CHECK-NEXT: call void @use8(i8 [[V1_ADJ]]) ; CHECK-NEXT: call void @use8(i8 [[V1_ADJ]]) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: final_right: ; CHECK-NEXT: call void @sideeffect1() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; pred: %c0 = icmp eq i8 %v0, 0 @@ -544,14 +570,16 @@ ; CHECK-NEXT: [[V3_ADJ_OLD:%.*]] = add i8 [[V1]], [[V2]] ; CHECK-NEXT: [[C3_OLD:%.*]] = icmp eq i8 [[V3]], 0 ; CHECK-NEXT: br i1 [[C3_OLD]], label [[FINAL_LEFT]], label [[FINAL_RIGHT]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: final_left: ; CHECK-NEXT: [[MERGE_LEFT:%.*]] = phi i8 [ [[V3_ADJ_OLD]], [[DISPATCH]] ], [ 0, [[PRED0]] ], [ [[V3_ADJ]], [[PRED1]] ] ; CHECK-NEXT: call void @use8(i8 [[MERGE_LEFT]]) ; CHECK-NEXT: call void @sideeffect0() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: final_right: ; CHECK-NEXT: call void @sideeffect1() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: %c0 = icmp eq i8 %v0, 0 @@ -594,16 +622,18 @@ ; CHECK-NEXT: [[V3_ADJ_OLD:%.*]] = add i8 [[V1]], [[V2]] ; CHECK-NEXT: [[C3_OLD:%.*]] = icmp eq i8 [[V3]], 0 ; CHECK-NEXT: br i1 [[C3_OLD]], label [[FINAL_LEFT]], label [[FINAL_RIGHT]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: final_left: ; CHECK-NEXT: [[MERGE_LEFT:%.*]] = phi i8 [ [[V3_ADJ_OLD]], [[DISPATCH]] ], [ 0, [[PRED0]] ], [ [[V3_ADJ]], [[PRED1]] ] ; CHECK-NEXT: [[MERGE_LEFT_2:%.*]] = phi i8 [ [[V3_ADJ_OLD]], [[DISPATCH]] ], [ 42, [[PRED0]] ], [ [[V3_ADJ]], [[PRED1]] ] ; CHECK-NEXT: call void @use8(i8 [[MERGE_LEFT]]) ; CHECK-NEXT: call void @use8(i8 [[MERGE_LEFT_2]]) ; CHECK-NEXT: call void @sideeffect0() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: final_right: ; CHECK-NEXT: call void @sideeffect1() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: %c0 = icmp eq i8 %v0, 0 @@ -646,14 +676,16 @@ ; CHECK-NEXT: call void @sideeffect0() ; CHECK-NEXT: call void @use8(i8 [[V2_ADJ]]) ; CHECK-NEXT: br label [[LEFT_END]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: left_end: ; CHECK-NEXT: [[MERGE_LEFT:%.*]] = phi i8 [ [[V2_ADJ]], [[FINAL_LEFT]] ], [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: call void @sideeffect1() ; CHECK-NEXT: call void @use8(i8 [[MERGE_LEFT]]) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: final_right: ; CHECK-NEXT: call void @sideeffect2() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: %c0 = icmp eq i8 %v0, 0 @@ -696,14 +728,16 @@ ; CHECK-NEXT: [[C3_OLD:%.*]] = icmp eq i8 [[V3]], 0 ; CHECK-NEXT: [[V3_ADJ_OLD:%.*]] = add i8 [[V4]], [[V5]] ; CHECK-NEXT: br i1 [[C3_OLD]], label [[FINAL_LEFT]], label [[FINAL_RIGHT]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: final_left: ; CHECK-NEXT: [[MERGE_LEFT:%.*]] = phi i8 [ [[V3_ADJ_OLD]], [[DISPATCH]] ], [ 0, [[PRED0]] ], [ [[V3_ADJ]], [[PRED1]] ] ; CHECK-NEXT: call void @use8(i8 [[MERGE_LEFT]]) ; CHECK-NEXT: call void @sideeffect0() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: final_right: ; CHECK-NEXT: call void @sideeffect1() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: %c0 = icmp eq i8 %v0, 0 diff --git a/llvm/test/Transforms/SimplifyCFG/guards.ll b/llvm/test/Transforms/SimplifyCFG/guards.ll --- a/llvm/test/Transforms/SimplifyCFG/guards.ll +++ b/llvm/test/Transforms/SimplifyCFG/guards.ll @@ -18,12 +18,13 @@ ; Demonstrate that we (intentionally) do not simplify a guard on undef ; CHECK-LABEL: @f_1( ; CHECK-NEXT: entry: -; CHECK-NEXT: br i1 [[C:%.*]], label [[TRUE:%.*]], label [[FALSE:%.*]] +; CHECK-NEXT: br i1 [[C:%.*]], label [[TRUE:%.*]], label [[COMMON_RET:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ 10, [[TRUE]] ], [ 20, [[ENTRY:%.*]] ] +; CHECK-NEXT: ret i32 [[COMMON_RET_OP]] ; CHECK: true: ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 undef) [ "deopt"() ] -; CHECK-NEXT: ret i32 10 -; CHECK: false: -; CHECK-NEXT: ret i32 20 +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: @@ -71,8 +72,8 @@ ; CHECK-NEXT: unreachable ; CHECK: merge_block: ; CHECK-NEXT: [[C1:%.*]] = load volatile i1, i1* [[C]], align 1 -; CHECK-NEXT: [[SPEC_SELECT:%.*]] = select i1 [[C1]], i32 50, i32 100 -; CHECK-NEXT: ret i32 [[SPEC_SELECT]] +; CHECK-NEXT: [[DOT:%.*]] = select i1 [[C1]], i32 50, i32 100 +; CHECK-NEXT: ret i32 [[DOT]] ; entry: %c0 = load volatile i1, i1* %c diff --git a/llvm/test/Transforms/SimplifyCFG/hoist-common-code.ll b/llvm/test/Transforms/SimplifyCFG/hoist-common-code.ll --- a/llvm/test/Transforms/SimplifyCFG/hoist-common-code.ll +++ b/llvm/test/Transforms/SimplifyCFG/hoist-common-code.ll @@ -5,6 +5,7 @@ define void @test(i1 %P, i32* %Q) { ; CHECK-LABEL: @test( +; CHECK-NEXT: common.ret: ; CHECK-NEXT: store i32 1, i32* [[Q:%.*]], align 4 ; CHECK-NEXT: [[A:%.*]] = load i32, i32* [[Q]], align 4 ; CHECK-NEXT: call void @bar(i32 [[A]]) diff --git a/llvm/test/Transforms/SimplifyCFG/indirectbr.ll b/llvm/test/Transforms/SimplifyCFG/indirectbr.ll --- a/llvm/test/Transforms/SimplifyCFG/indirectbr.ll +++ b/llvm/test/Transforms/SimplifyCFG/indirectbr.ll @@ -20,13 +20,15 @@ ; CHECK: BB0: ; CHECK-NEXT: call void @A() ; CHECK-NEXT: br label [[BB1]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: BB1: ; CHECK-NEXT: [[X:%.*]] = phi i32 [ 0, [[BB0]] ], [ 1, [[ENTRY:%.*]] ] ; CHECK-NEXT: call void @B(i32 [[X]]) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: BB2: ; CHECK-NEXT: call void @C() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: store i8* blockaddress(@indbrtest0, %BB0), i8** %P @@ -105,12 +107,14 @@ ; CHECK-LABEL: @indbrtest3( ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 [[COND:%.*]], label [[L1:%.*]], label [[L2:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: L1: ; CHECK-NEXT: call void @A() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: L2: ; CHECK-NEXT: call void @C() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: %indirect.goto.dest = select i1 %cond, i8* blockaddress(@indbrtest3, %L1), i8* blockaddress(@indbrtest3, %L2) diff --git a/llvm/test/Transforms/SimplifyCFG/invoke.ll b/llvm/test/Transforms/SimplifyCFG/invoke.ll --- a/llvm/test/Transforms/SimplifyCFG/invoke.ll +++ b/llvm/test/Transforms/SimplifyCFG/invoke.ll @@ -168,13 +168,13 @@ ; CHECK-LABEL: @f6( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[FOO:%.*]] = invoke i32 @fn() -; CHECK-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD:%.*]] -; CHECK: invoke.cont2: +; CHECK-NEXT: to label [[COMMON_RET:%.*]] unwind label [[LPAD:%.*]] +; CHECK: common.ret: ; CHECK-NEXT: ret void ; CHECK: lpad: ; CHECK-NEXT: [[TMP0:%.*]] = landingpad { i8*, i32 } ; CHECK-NEXT: cleanup -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: invoke void @purefn() diff --git a/llvm/test/Transforms/SimplifyCFG/pr39807.ll b/llvm/test/Transforms/SimplifyCFG/pr39807.ll --- a/llvm/test/Transforms/SimplifyCFG/pr39807.ll +++ b/llvm/test/Transforms/SimplifyCFG/pr39807.ll @@ -4,7 +4,7 @@ define void @test(i1 %b) personality void()* @personality !dbg !1 { ; CHECK: invoke void @inlinable() -; CHECK-NEXT: to label %success unwind label %failure, !dbg ![[DBGLOC:[0-9]+]] +; CHECK-NEXT: to label %common.ret unwind label %failure, !dbg ![[DBGLOC:[0-9]+]] br i1 %b, label %if, label %else if: diff --git a/llvm/test/Transforms/SimplifyCFG/pr46638.ll b/llvm/test/Transforms/SimplifyCFG/pr46638.ll --- a/llvm/test/Transforms/SimplifyCFG/pr46638.ll +++ b/llvm/test/Transforms/SimplifyCFG/pr46638.ll @@ -11,13 +11,15 @@ ; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i32 [[X]], 0 ; CHECK-NEXT: [[EXT:%.*]] = zext i1 [[CMP2]] to i32 ; CHECK-NEXT: call void @dummy(i32 [[EXT]]) +; CHECK-NEXT: br label [[COMMON_RET:%.*]] +; CHECK: common.ret: ; CHECK-NEXT: ret void ; CHECK: true2.critedge: ; CHECK-NEXT: [[CMP2_C:%.*]] = icmp sgt i32 [[X]], 0 ; CHECK-NEXT: [[EXT_C:%.*]] = zext i1 [[CMP2_C]] to i32 ; CHECK-NEXT: call void @dummy(i32 [[EXT_C]]) ; CHECK-NEXT: call void @dummy(i32 2) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; %cmp1 = icmp slt i32 %x, 0 call void @llvm.assume(i1 %cmp1) diff --git a/llvm/test/Transforms/SimplifyCFG/preserve-branchweights.ll b/llvm/test/Transforms/SimplifyCFG/preserve-branchweights.ll --- a/llvm/test/Transforms/SimplifyCFG/preserve-branchweights.ll +++ b/llvm/test/Transforms/SimplifyCFG/preserve-branchweights.ll @@ -13,12 +13,14 @@ ; CHECK-NEXT: [[C:%.*]] = or i1 [[B:%.*]], false ; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[A_NOT]], i1 [[C]], i1 false ; CHECK-NEXT: br i1 [[OR_COND]], label [[Z:%.*]], label [[Y:%.*]], !prof [[PROF0:![0-9]+]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: Y: ; CHECK-NEXT: call void @helper(i32 0) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: Z: ; CHECK-NEXT: call void @helper(i32 1) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: br i1 %a, label %Y, label %X, !prof !0 @@ -45,12 +47,14 @@ ; CHECK-NEXT: [[C:%.*]] = or i1 [[B:%.*]], false ; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[A_NOT]], i1 [[C]], i1 false ; CHECK-NEXT: br i1 [[OR_COND]], label [[Z:%.*]], label [[Y:%.*]], !prof [[PROF1:![0-9]+]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: Y: ; CHECK-NEXT: call void @helper(i32 0) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: Z: ; CHECK-NEXT: call void @helper(i32 1) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: br i1 %a, label %Y, label %X, !prof !12 @@ -73,12 +77,14 @@ ; CHECK-NEXT: [[C:%.*]] = or i1 [[B:%.*]], false ; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[A:%.*]], i1 [[C]], i1 false ; CHECK-NEXT: br i1 [[OR_COND]], label [[Z:%.*]], label [[Y:%.*]], !prof [[PROF2:![0-9]+]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: Y: ; CHECK-NEXT: call void @helper(i32 0) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: Z: ; CHECK-NEXT: call void @helper(i32 1) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: br i1 %a, label %X, label %Y, !prof !1 @@ -102,12 +108,14 @@ ; CHECK-NEXT: [[C:%.*]] = or i1 [[B:%.*]], false ; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[A:%.*]], i1 [[C]], i1 false ; CHECK-NEXT: br i1 [[OR_COND]], label [[Z:%.*]], label [[Y:%.*]], !prof [[PROF1]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: Y: ; CHECK-NEXT: call void @helper(i32 0) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: Z: ; CHECK-NEXT: call void @helper(i32 1) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: br i1 %a, label %X, label %Y, !prof !1 @@ -131,12 +139,14 @@ ; CHECK-NEXT: [[C:%.*]] = or i1 [[B:%.*]], false ; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[A:%.*]], i1 [[C]], i1 false ; CHECK-NEXT: br i1 [[OR_COND]], label [[Z:%.*]], label [[Y:%.*]], !prof [[PROF1]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: Y: ; CHECK-NEXT: call void @helper(i32 0) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: Z: ; CHECK-NEXT: call void @helper(i32 1) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: br i1 %a, label %X, label %Y @@ -262,12 +272,14 @@ ; CHECK-NEXT: [[C:%.*]] = or i1 [[B:%.*]], false ; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[A:%.*]], i1 true, i1 [[C]] ; CHECK-NEXT: br i1 [[OR_COND]], label [[Y:%.*]], label [[Z:%.*]], !prof [[PROF5:![0-9]+]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: Y: ; CHECK-NEXT: call void @helper(i32 0) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: Z: ; CHECK-NEXT: call void @helper(i32 1) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: br i1 %a, label %Y, label %X, !prof !0 @@ -291,12 +303,14 @@ ; CHECK-NEXT: [[C:%.*]] = or i1 [[B:%.*]], false ; CHECK-NEXT: [[BRMERGE:%.*]] = select i1 [[A:%.*]], i1 true, i1 [[C]] ; CHECK-NEXT: br i1 [[BRMERGE]], label [[Y:%.*]], label [[Z:%.*]], !prof [[PROF6:![0-9]+]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: Y: ; CHECK-NEXT: call void @helper(i32 0) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: Z: ; CHECK-NEXT: call void @helper(i32 1) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: %c = or i1 %b, false @@ -320,12 +334,14 @@ ; CHECK-NEXT: entry: ; CHECK-NEXT: [[LT:%.*]] = icmp slt i64 [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: br i1 [[LT]], label [[A:%.*]], label [[B:%.*]], !prof [[PROF7:![0-9]+]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: a: ; CHECK-NEXT: call void @helper(i32 0) #[[ATTR1:[0-9]+]] -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: b: ; CHECK-NEXT: call void @helper(i32 1) #[[ATTR1]] -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: %lt = icmp slt i64 %x, %y @@ -357,16 +373,19 @@ ; CHECK-NEXT: i32 2, label [[END]] ; CHECK-NEXT: i32 92, label [[END]] ; CHECK-NEXT: ], !prof [[PROF8:![0-9]+]] +; CHECK: common.ret: +; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i1 [ [[RETA:%.*]], [[A]] ], [ [[RET:%.*]], [[END]] ] +; CHECK-NEXT: ret i1 [[COMMON_RET_OP]] ; CHECK: a: ; CHECK-NEXT: call void @helper(i32 0) #[[ATTR1]] -; CHECK-NEXT: [[RETA:%.*]] = icmp slt i32 [[X]], [[Y:%.*]] -; CHECK-NEXT: ret i1 [[RETA]] +; CHECK-NEXT: [[RETA]] = icmp slt i32 [[X]], [[Y:%.*]] +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: bees: ; CHECK-NEXT: br label [[END]] ; CHECK: end: -; CHECK-NEXT: [[RET:%.*]] = phi i1 [ true, [[ENTRY:%.*]] ], [ false, [[BEES]] ], [ true, [[ENTRY]] ], [ true, [[ENTRY]] ] +; CHECK-NEXT: [[RET]] = phi i1 [ true, [[ENTRY:%.*]] ], [ false, [[BEES]] ], [ true, [[ENTRY]] ], [ true, [[ENTRY]] ] ; CHECK-NEXT: call void @helper(i32 2) #[[ATTR1]] -; CHECK-NEXT: ret i1 [[RET]] +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: switch i32 %x, label %bees [ @@ -396,12 +415,14 @@ ; CHECK-NEXT: [[X_OFF:%.*]] = add i32 [[X:%.*]], -1 ; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i32 [[X_OFF]], 3 ; CHECK-NEXT: br i1 [[SWITCH]], label [[LOR_END:%.*]], label [[LOR_RHS:%.*]], !prof [[PROF9:![0-9]+]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: lor.rhs: ; CHECK-NEXT: call void @helper(i32 1) #[[ATTR1]] -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: lor.end: ; CHECK-NEXT: call void @helper(i32 0) #[[ATTR1]] -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: switch i32 %x, label %lor.rhs [ @@ -426,12 +447,14 @@ ; CHECK-NEXT: [[I:%.*]] = shl i32 [[X:%.*]], 1 ; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[I]], 24 ; CHECK-NEXT: br i1 [[COND]], label [[C:%.*]], label [[A:%.*]], !prof [[PROF10:![0-9]+]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: a: ; CHECK-NEXT: call void @helper(i32 0) #[[ATTR1]] -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: c: ; CHECK-NEXT: call void @helper(i32 2) #[[ATTR1]] -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; %i = shl i32 %x, 1 switch i32 %i, label %a [ diff --git a/llvm/test/Transforms/SimplifyCFG/switch-dead-default.ll b/llvm/test/Transforms/SimplifyCFG/switch-dead-default.ll --- a/llvm/test/Transforms/SimplifyCFG/switch-dead-default.ll +++ b/llvm/test/Transforms/SimplifyCFG/switch-dead-default.ll @@ -9,12 +9,14 @@ ; CHECK-NEXT: [[A_OFF:%.*]] = add i1 [[A:%.*]], true ; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i1 [[A_OFF]], true ; CHECK-NEXT: br i1 [[SWITCH]], label [[TRUE:%.*]], label [[FALSE:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: true: ; CHECK-NEXT: call void @foo(i32 1) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: false: ; CHECK-NEXT: call void @foo(i32 3) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; switch i1 %a, label %default [i1 1, label %true i1 0, label %false] @@ -37,18 +39,20 @@ ; CHECK-NEXT: i2 -2, label [[CASE2:%.*]] ; CHECK-NEXT: i2 -1, label [[CASE3:%.*]] ; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: case0: ; CHECK-NEXT: call void @foo(i32 0) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: case1: ; CHECK-NEXT: call void @foo(i32 1) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; CHECK: case2: ; CHECK-NEXT: call void @foo(i32 2) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; CHECK: case3: ; CHECK-NEXT: call void @foo(i32 3) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; CHECK: default1: ; CHECK-NEXT: unreachable ; @@ -82,18 +86,20 @@ ; CHECK-NEXT: i2 1, label [[CASE1:%.*]] ; CHECK-NEXT: i2 -2, label [[CASE2:%.*]] ; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: case0: ; CHECK-NEXT: call void @foo(i32 0) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: case1: ; CHECK-NEXT: call void @foo(i32 1) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; CHECK: case2: ; CHECK-NEXT: call void @foo(i32 2) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; CHECK: default: ; CHECK-NEXT: call void @foo(i32 3) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; switch i2 %a, label %default [i2 0, label %case0 i2 1, label %case1 @@ -121,15 +127,17 @@ ; CHECK-NEXT: i128 0, label [[CASE0:%.*]] ; CHECK-NEXT: i128 1, label [[CASE1:%.*]] ; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: case0: ; CHECK-NEXT: call void @foo(i32 0) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: case1: ; CHECK-NEXT: call void @foo(i32 1) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; CHECK: default: ; CHECK-NEXT: call void @foo(i32 2) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; switch i128 %a, label %default [i128 0, label %case0 i128 1, label %case1] @@ -153,12 +161,14 @@ ; CHECK-NEXT: [[A_OFF:%.*]] = add i8 [[A]], -1 ; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i8 [[A_OFF]], 1 ; CHECK-NEXT: br i1 [[SWITCH]], label [[TRUE:%.*]], label [[FALSE:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: true: ; CHECK-NEXT: call void @foo(i32 1) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: false: ; CHECK-NEXT: call void @foo(i32 3) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; %cmp = icmp ult i8 %a, 2 call void @llvm.assume(i1 %cmp) @@ -184,12 +194,14 @@ ; CHECK-NEXT: [[A_OFF:%.*]] = add i8 [[A]], 1 ; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i8 [[A_OFF]], 1 ; CHECK-NEXT: br i1 [[SWITCH]], label [[TRUE:%.*]], label [[FALSE:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: true: ; CHECK-NEXT: call void @foo(i32 1) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: false: ; CHECK-NEXT: call void @foo(i32 3) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; %and = and i8 %a, 254 %cmp = icmp eq i8 %and, 254 @@ -217,12 +229,14 @@ ; CHECK-NEXT: [[A_OFF:%.*]] = add i8 [[A]], 1 ; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i8 [[A_OFF]], 1 ; CHECK-NEXT: br i1 [[SWITCH]], label [[TRUE:%.*]], label [[FALSE:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: true: ; CHECK-NEXT: call void @foo(i32 1) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: false: ; CHECK-NEXT: call void @foo(i32 3) -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; %and = and i8 %a, 254 %cmp = icmp eq i8 %and, 254 diff --git a/llvm/test/Transforms/SimplifyCFG/switch-on-const-select.ll b/llvm/test/Transforms/SimplifyCFG/switch-on-const-select.ll --- a/llvm/test/Transforms/SimplifyCFG/switch-on-const-select.ll +++ b/llvm/test/Transforms/SimplifyCFG/switch-on-const-select.ll @@ -10,13 +10,16 @@ ; CHECK: switch: ; CHECK-NEXT: [[LT:%.*]] = icmp slt i64 [[X]], [[Y]] ; CHECK-NEXT: br i1 [[LT]], label [[A:%.*]], label [[B]] +; CHECK: common.ret: +; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ 1, [[A]] ], [ [[RETVAL:%.*]], [[B]] ] +; CHECK-NEXT: ret i32 [[COMMON_RET_OP]] ; CHECK: a: ; CHECK-NEXT: tail call void @bees.a() #[[ATTR0:[0-9]+]] -; CHECK-NEXT: ret i32 1 +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: b: -; CHECK-NEXT: [[RETVAL:%.*]] = phi i32 [ 0, [[SWITCH]] ], [ 2, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[RETVAL]] = phi i32 [ 0, [[SWITCH]] ], [ 2, [[ENTRY:%.*]] ] ; CHECK-NEXT: tail call void @bees.b() #[[ATTR0]] -; CHECK-NEXT: ret i32 [[RETVAL]] +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: %eq = icmp eq i64 %x, %y @@ -127,8 +130,8 @@ ; CHECK-NEXT: [[EQ:%.*]] = icmp eq i64 [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[LT:%.*]] = icmp slt i64 [[X]], [[Y]] ; CHECK-NEXT: [[SPEC_SELECT:%.*]] = select i1 [[LT]], i32 -1, i32 1 -; CHECK-NEXT: [[VAL:%.*]] = select i1 [[EQ]], i32 0, i32 [[SPEC_SELECT]] -; CHECK-NEXT: ret i32 [[VAL]] +; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = select i1 [[EQ]], i32 0, i32 [[SPEC_SELECT]] +; CHECK-NEXT: ret i32 [[COMMON_RET_OP]] ; entry: %eq = icmp eq i64 %x, %y diff --git a/llvm/test/Transforms/SimplifyCFG/switch-range-to-icmp.ll b/llvm/test/Transforms/SimplifyCFG/switch-range-to-icmp.ll --- a/llvm/test/Transforms/SimplifyCFG/switch-range-to-icmp.ll +++ b/llvm/test/Transforms/SimplifyCFG/switch-range-to-icmp.ll @@ -9,12 +9,15 @@ ; CHECK-NEXT: [[X_OFF:%.*]] = add i32 [[X:%.*]], -5 ; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i32 [[X_OFF]], 3 ; CHECK-NEXT: br i1 [[SWITCH]], label [[A:%.*]], label [[DEFAULT:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[TMP0:%.*]], [[DEFAULT]] ], [ [[TMP1:%.*]], [[A]] ] +; CHECK-NEXT: ret i32 [[COMMON_RET_OP]] ; CHECK: default: -; CHECK-NEXT: [[TMP0:%.*]] = call i32 @f(i32 0) -; CHECK-NEXT: ret i32 [[TMP0]] +; CHECK-NEXT: [[TMP0]] = call i32 @f(i32 0) +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: a: -; CHECK-NEXT: [[TMP1:%.*]] = call i32 @f(i32 1) -; CHECK-NEXT: ret i32 [[TMP1]] +; CHECK-NEXT: [[TMP1]] = call i32 @f(i32 1) +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: @@ -38,12 +41,15 @@ ; CHECK-NEXT: [[X_OFF:%.*]] = add i32 [[X:%.*]], -5 ; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i32 [[X_OFF]], 3 ; CHECK-NEXT: br i1 [[SWITCH]], label [[A:%.*]], label [[B:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[TMP0:%.*]], [[A]] ], [ [[TMP1:%.*]], [[B]] ] +; CHECK-NEXT: ret i32 [[COMMON_RET_OP]] ; CHECK: a: -; CHECK-NEXT: [[TMP0:%.*]] = call i32 @f(i32 0) -; CHECK-NEXT: ret i32 [[TMP0]] +; CHECK-NEXT: [[TMP0]] = call i32 @f(i32 0) +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: b: -; CHECK-NEXT: [[TMP1:%.*]] = call i32 @f(i32 1) -; CHECK-NEXT: ret i32 [[TMP1]] +; CHECK-NEXT: [[TMP1]] = call i32 @f(i32 1) +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: @@ -73,12 +79,15 @@ ; CHECK-NEXT: [[X_OFF:%.*]] = add i32 [[X:%.*]], -5 ; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i32 [[X_OFF]], 3 ; CHECK-NEXT: br i1 [[SWITCH]], label [[A:%.*]], label [[B:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[TMP0:%.*]], [[A]] ], [ [[TMP1:%.*]], [[B]] ] +; CHECK-NEXT: ret i32 [[COMMON_RET_OP]] ; CHECK: a: -; CHECK-NEXT: [[TMP0:%.*]] = call i32 @f(i32 0) -; CHECK-NEXT: ret i32 [[TMP0]] +; CHECK-NEXT: [[TMP0]] = call i32 @f(i32 0) +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: b: -; CHECK-NEXT: [[TMP1:%.*]] = call i32 @f(i32 1) -; CHECK-NEXT: ret i32 [[TMP1]] +; CHECK-NEXT: [[TMP1]] = call i32 @f(i32 1) +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: diff --git a/llvm/test/Transforms/SimplifyCFG/switch_create-custom-dl.ll b/llvm/test/Transforms/SimplifyCFG/switch_create-custom-dl.ll --- a/llvm/test/Transforms/SimplifyCFG/switch_create-custom-dl.ll +++ b/llvm/test/Transforms/SimplifyCFG/switch_create-custom-dl.ll @@ -12,12 +12,14 @@ ; CHECK-NEXT: i32 17, label [[T:%.*]] ; CHECK-NEXT: i32 4, label [[T]] ; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: T: ; CHECK-NEXT: call void @foo1() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: F: ; CHECK-NEXT: call void @foo2() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; %C1 = icmp eq i32 %V, 4 ; [#uses=1] %C2 = icmp eq i32 %V, 17 ; [#uses=1] @@ -38,12 +40,14 @@ ; CHECK-NEXT: i40 17, label [[T:%.*]] ; CHECK-NEXT: i40 4, label [[T]] ; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: T: ; CHECK-NEXT: call void @foo1() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: F: ; CHECK-NEXT: call void @foo2() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; %C1 = icmp eq i32* %V, inttoptr (i32 4 to i32*) %C2 = icmp eq i32* %V, inttoptr (i32 17 to i32*) @@ -64,12 +68,14 @@ ; CHECK-NEXT: i40 17, label [[T:%.*]] ; CHECK-NEXT: i40 4, label [[T]] ; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: T: ; CHECK-NEXT: call void @foo1() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: F: ; CHECK-NEXT: call void @foo2() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; %C1 = icmp eq i32 addrspace(1)* %V, inttoptr (i32 4 to i32 addrspace(1)*) %C2 = icmp eq i32 addrspace(1)* %V, inttoptr (i32 17 to i32 addrspace(1)*) @@ -89,12 +95,14 @@ ; CHECK-NEXT: i32 17, label [[F:%.*]] ; CHECK-NEXT: i32 4, label [[F]] ; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: T: ; CHECK-NEXT: call void @foo1() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: F: ; CHECK-NEXT: call void @foo2() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; %C1 = icmp ne i32 %V, 4 ; [#uses=1] %C2 = icmp ne i32 %V, 17 ; [#uses=1] @@ -114,12 +122,14 @@ ; CHECK-NEXT: i32 4, label [[T:%.*]] ; CHECK-NEXT: i32 17, label [[T]] ; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: T: ; CHECK-NEXT: call void @foo1() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: F: ; CHECK-NEXT: call void @foo2() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; %C1 = icmp eq i32 %V, 4 ; [#uses=1] br i1 %C1, label %T, label %N @@ -248,15 +258,15 @@ ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[X:%.*]], 32 ; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[SWITCH_EARLY_TEST:%.*]] ; CHECK: switch.early.test: -; CHECK-NEXT: switch i8 [[C:%.*]], label [[IF_END:%.*]] [ +; CHECK-NEXT: switch i8 [[C:%.*]], label [[COMMON_RET:%.*]] [ ; CHECK-NEXT: i8 99, label [[IF_THEN]] ; CHECK-NEXT: i8 97, label [[IF_THEN]] ; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: if.then: ; CHECK-NEXT: tail call void @foo1() #[[ATTR2:[0-9]+]] -; CHECK-NEXT: ret void -; CHECK: if.end: -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: %cmp = icmp ult i32 %x, 32 @@ -283,16 +293,17 @@ ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[X:%.*]], 32 ; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN]], label [[SWITCH_EARLY_TEST:%.*]] ; CHECK: switch.early.test: -; CHECK-NEXT: switch i8 [[C:%.*]], label [[IF_END:%.*]] [ +; CHECK-NEXT: switch i8 [[C:%.*]], label [[COMMON_RET:%.*]] [ ; CHECK-NEXT: i8 99, label [[IF_THEN]] ; CHECK-NEXT: i8 97, label [[IF_THEN]] ; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[A:%.*]], [[IF_THEN]] ], [ 0, [[SWITCH_EARLY_TEST]] ] +; CHECK-NEXT: ret i32 [[COMMON_RET_OP]] ; CHECK: if.then: -; CHECK-NEXT: [[A:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ 42, [[SWITCH_EARLY_TEST]] ], [ 42, [[N]] ], [ 42, [[SWITCH_EARLY_TEST]] ] +; CHECK-NEXT: [[A]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ 42, [[SWITCH_EARLY_TEST]] ], [ 42, [[N]] ], [ 42, [[SWITCH_EARLY_TEST]] ] ; CHECK-NEXT: tail call void @foo1() #[[ATTR2]] -; CHECK-NEXT: ret i32 [[A]] -; CHECK: if.end: -; CHECK-NEXT: ret i32 0 +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: br i1 %C, label %N, label %if.then @@ -395,12 +406,15 @@ ; CHECK-NEXT: i32 51, label [[F]] ; CHECK-NEXT: i32 0, label [[F]] ; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ 123, [[T]] ], [ 324, [[F]] ] +; CHECK-NEXT: ret i32 [[COMMON_RET_OP]] ; CHECK: T: ; CHECK-NEXT: call void @foo1() -; CHECK-NEXT: ret i32 123 +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: F: ; CHECK-NEXT: call void @foo2() -; CHECK-NEXT: ret i32 324 +; CHECK-NEXT: br label [[COMMON_RET]] ; %A = icmp ne i32 %mode, 0 %B = icmp ne i32 %mode, 51 @@ -636,12 +650,12 @@ ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[X:%.*]], 3 ; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i32 [[Y:%.*]], 2 ; CHECK-NEXT: [[OR_COND775:%.*]] = or i1 [[CMP]], [[SWITCH]] -; CHECK-NEXT: br i1 [[OR_COND775]], label [[LOR_LHS_FALSE8:%.*]], label [[RETURN:%.*]] +; CHECK-NEXT: br i1 [[OR_COND775]], label [[LOR_LHS_FALSE8:%.*]], label [[COMMON_RET:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: lor.lhs.false8: ; CHECK-NEXT: tail call void @foo1() -; CHECK-NEXT: ret void -; CHECK: return: -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; %cmp = icmp ult i32 %x, 3 %switch = icmp ult i32 %y, 2 @@ -735,16 +749,16 @@ define void @test19(i32 %arg) { ; CHECK-LABEL: @test19( -; CHECK-NEXT: switch i32 [[ARG:%.*]], label [[ELSE:%.*]] [ +; CHECK-NEXT: switch i32 [[ARG:%.*]], label [[COMMON_RET:%.*]] [ ; CHECK-NEXT: i32 32, label [[IF:%.*]] ; CHECK-NEXT: i32 13, label [[IF]] ; CHECK-NEXT: i32 12, label [[IF]] ; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: if: ; CHECK-NEXT: call void @foo1() -; CHECK-NEXT: ret void -; CHECK: else: -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; %and = and i32 %arg, -2 %cmp1 = icmp eq i32 %and, 12 @@ -767,12 +781,12 @@ ; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[AND]], 13 ; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[ARG]], 32 ; CHECK-NEXT: [[PRED:%.*]] = or i1 [[CMP1]], [[CMP2]] -; CHECK-NEXT: br i1 [[PRED]], label [[IF:%.*]], label [[ELSE:%.*]] +; CHECK-NEXT: br i1 [[PRED]], label [[IF:%.*]], label [[COMMON_RET:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: if: ; CHECK-NEXT: call void @foo1() -; CHECK-NEXT: ret void -; CHECK: else: -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; %and = and i32 %arg, -2 %cmp1 = icmp eq i32 %and, 13 @@ -792,15 +806,15 @@ define void @test21(i32 %arg) { ; CHECK-LABEL: @test21( ; CHECK-NEXT: switch i32 [[ARG:%.*]], label [[IF:%.*]] [ -; CHECK-NEXT: i32 32, label [[ELSE:%.*]] -; CHECK-NEXT: i32 13, label [[ELSE]] -; CHECK-NEXT: i32 12, label [[ELSE]] +; CHECK-NEXT: i32 32, label [[COMMON_RET:%.*]] +; CHECK-NEXT: i32 13, label [[COMMON_RET]] +; CHECK-NEXT: i32 12, label [[COMMON_RET]] ; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: if: ; CHECK-NEXT: call void @foo1() -; CHECK-NEXT: ret void -; CHECK: else: -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; %and = or i32 %arg, 1 %cmp1 = icmp ne i32 %and, 13 @@ -823,12 +837,12 @@ ; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[AND]], 12 ; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i32 [[ARG]], 32 ; CHECK-NEXT: [[PRED:%.*]] = and i1 [[CMP1]], [[CMP2]] -; CHECK-NEXT: br i1 [[PRED]], label [[IF:%.*]], label [[ELSE:%.*]] +; CHECK-NEXT: br i1 [[PRED]], label [[IF:%.*]], label [[COMMON_RET:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: if: ; CHECK-NEXT: call void @foo1() -; CHECK-NEXT: ret void -; CHECK: else: -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; %and = or i32 %arg, 1 %cmp1 = icmp ne i32 %and, 12 diff --git a/llvm/test/Transforms/SimplifyCFG/switch_create.ll b/llvm/test/Transforms/SimplifyCFG/switch_create.ll --- a/llvm/test/Transforms/SimplifyCFG/switch_create.ll +++ b/llvm/test/Transforms/SimplifyCFG/switch_create.ll @@ -12,12 +12,14 @@ ; CHECK-NEXT: i32 17, label [[T:%.*]] ; CHECK-NEXT: i32 4, label [[T]] ; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: T: ; CHECK-NEXT: call void @foo1() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: F: ; CHECK-NEXT: call void @foo2() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; %C1 = icmp eq i32 %V, 4 ; [#uses=1] %C2 = icmp eq i32 %V, 17 ; [#uses=1] @@ -37,12 +39,14 @@ ; CHECK-NEXT: i32 17, label [[T:%.*]] ; CHECK-NEXT: i32 4, label [[T]] ; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: T: ; CHECK-NEXT: call void @foo1() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: F: ; CHECK-NEXT: call void @foo2() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; %C1 = icmp eq i32 %V, 4 %C2 = icmp eq i32 %V, 17 @@ -63,12 +67,14 @@ ; DL-NEXT: i32 17, label [[T:%.*]] ; DL-NEXT: i32 4, label [[T]] ; DL-NEXT: ] +; DL: common.ret: +; DL-NEXT: ret void ; DL: T: ; DL-NEXT: call void @foo1() -; DL-NEXT: ret void +; DL-NEXT: br label [[COMMON_RET:%.*]] ; DL: F: ; DL-NEXT: call void @foo2() -; DL-NEXT: ret void +; DL-NEXT: br label [[COMMON_RET]] ; %C1 = icmp eq i32* %V, inttoptr (i32 4 to i32*) %C2 = icmp eq i32* %V, inttoptr (i32 17 to i32*) @@ -89,12 +95,14 @@ ; DL-NEXT: i16 17, label [[T:%.*]] ; DL-NEXT: i16 4, label [[T]] ; DL-NEXT: ] +; DL: common.ret: +; DL-NEXT: ret void ; DL: T: ; DL-NEXT: call void @foo1() -; DL-NEXT: ret void +; DL-NEXT: br label [[COMMON_RET:%.*]] ; DL: F: ; DL-NEXT: call void @foo2() -; DL-NEXT: ret void +; DL-NEXT: br label [[COMMON_RET]] ; %C1 = icmp eq i32 addrspace(1)* %V, inttoptr (i32 4 to i32 addrspace(1)*) %C2 = icmp eq i32 addrspace(1)* %V, inttoptr (i32 17 to i32 addrspace(1)*) @@ -114,12 +122,14 @@ ; CHECK-NEXT: i32 17, label [[F:%.*]] ; CHECK-NEXT: i32 4, label [[F]] ; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: T: ; CHECK-NEXT: call void @foo1() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: F: ; CHECK-NEXT: call void @foo2() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; %C1 = icmp ne i32 %V, 4 ; [#uses=1] %C2 = icmp ne i32 %V, 17 ; [#uses=1] @@ -139,12 +149,14 @@ ; CHECK-NEXT: i32 17, label [[F:%.*]] ; CHECK-NEXT: i32 4, label [[F]] ; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: T: ; CHECK-NEXT: call void @foo1() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: F: ; CHECK-NEXT: call void @foo2() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; %C1 = icmp ne i32 %V, 4 %C2 = icmp ne i32 %V, 17 @@ -164,12 +176,14 @@ ; CHECK-NEXT: i32 4, label [[T:%.*]] ; CHECK-NEXT: i32 17, label [[T]] ; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: T: ; CHECK-NEXT: call void @foo1() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: F: ; CHECK-NEXT: call void @foo2() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; %C1 = icmp eq i32 %V, 4 ; [#uses=1] br i1 %C1, label %T, label %N @@ -298,15 +312,15 @@ ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[X:%.*]], 32 ; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[SWITCH_EARLY_TEST:%.*]] ; CHECK: switch.early.test: -; CHECK-NEXT: switch i8 [[C:%.*]], label [[IF_END:%.*]] [ +; CHECK-NEXT: switch i8 [[C:%.*]], label [[COMMON_RET:%.*]] [ ; CHECK-NEXT: i8 99, label [[IF_THEN]] ; CHECK-NEXT: i8 97, label [[IF_THEN]] ; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: if.then: ; CHECK-NEXT: tail call void @foo1() #[[ATTR2:[0-9]+]] -; CHECK-NEXT: ret void -; CHECK: if.end: -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: %cmp = icmp ult i32 %x, 32 @@ -333,16 +347,17 @@ ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[X:%.*]], 32 ; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN]], label [[SWITCH_EARLY_TEST:%.*]] ; CHECK: switch.early.test: -; CHECK-NEXT: switch i8 [[C:%.*]], label [[IF_END:%.*]] [ +; CHECK-NEXT: switch i8 [[C:%.*]], label [[COMMON_RET:%.*]] [ ; CHECK-NEXT: i8 99, label [[IF_THEN]] ; CHECK-NEXT: i8 97, label [[IF_THEN]] ; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ [[A:%.*]], [[IF_THEN]] ], [ 0, [[SWITCH_EARLY_TEST]] ] +; CHECK-NEXT: ret i32 [[COMMON_RET_OP]] ; CHECK: if.then: -; CHECK-NEXT: [[A:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ 42, [[SWITCH_EARLY_TEST]] ], [ 42, [[N]] ], [ 42, [[SWITCH_EARLY_TEST]] ] +; CHECK-NEXT: [[A]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ 42, [[SWITCH_EARLY_TEST]] ], [ 42, [[N]] ], [ 42, [[SWITCH_EARLY_TEST]] ] ; CHECK-NEXT: tail call void @foo1() #[[ATTR2]] -; CHECK-NEXT: ret i32 [[A]] -; CHECK: if.end: -; CHECK-NEXT: ret i32 0 +; CHECK-NEXT: br label [[COMMON_RET]] ; entry: br i1 %C, label %N, label %if.then @@ -445,12 +460,15 @@ ; CHECK-NEXT: i32 51, label [[F]] ; CHECK-NEXT: i32 0, label [[F]] ; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ 123, [[T]] ], [ 324, [[F]] ] +; CHECK-NEXT: ret i32 [[COMMON_RET_OP]] ; CHECK: T: ; CHECK-NEXT: call void @foo1() -; CHECK-NEXT: ret i32 123 +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: F: ; CHECK-NEXT: call void @foo2() -; CHECK-NEXT: ret i32 324 +; CHECK-NEXT: br label [[COMMON_RET]] ; %A = icmp ne i32 %mode, 0 %B = icmp ne i32 %mode, 51 @@ -474,12 +492,15 @@ ; CHECK-NEXT: i32 51, label [[F]] ; CHECK-NEXT: i32 0, label [[F]] ; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ 123, [[T]] ], [ 324, [[F]] ] +; CHECK-NEXT: ret i32 [[COMMON_RET_OP]] ; CHECK: T: ; CHECK-NEXT: call void @foo1() -; CHECK-NEXT: ret i32 123 +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: F: ; CHECK-NEXT: call void @foo2() -; CHECK-NEXT: ret i32 324 +; CHECK-NEXT: br label [[COMMON_RET]] ; %A = icmp ne i32 %mode, 0 %B = icmp ne i32 %mode, 51 @@ -504,12 +525,15 @@ ; CHECK-NEXT: i32 51, label [[F]] ; CHECK-NEXT: i32 0, label [[F]] ; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ 123, [[T]] ], [ 324, [[F]] ] +; CHECK-NEXT: ret i32 [[COMMON_RET_OP]] ; CHECK: T: ; CHECK-NEXT: call void @foo1() -; CHECK-NEXT: ret i32 123 +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: F: ; CHECK-NEXT: call void @foo2() -; CHECK-NEXT: ret i32 324 +; CHECK-NEXT: br label [[COMMON_RET]] ; %A = icmp ne i32 %mode, 0 %B = icmp ne i32 %mode, 51 @@ -533,12 +557,15 @@ ; CHECK-NEXT: i32 51, label [[F]] ; CHECK-NEXT: i32 0, label [[F]] ; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: [[COMMON_RET_OP:%.*]] = phi i32 [ 123, [[T]] ], [ 324, [[F]] ] +; CHECK-NEXT: ret i32 [[COMMON_RET_OP]] ; CHECK: T: ; CHECK-NEXT: call void @foo1() -; CHECK-NEXT: ret i32 123 +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: F: ; CHECK-NEXT: call void @foo2() -; CHECK-NEXT: ret i32 324 +; CHECK-NEXT: br label [[COMMON_RET]] ; %A = icmp ne i32 %mode, 0 %B = icmp ne i32 %mode, 51 @@ -774,12 +801,12 @@ ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[X:%.*]], 3 ; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i32 [[Y:%.*]], 2 ; CHECK-NEXT: [[OR_COND775:%.*]] = or i1 [[CMP]], [[SWITCH]] -; CHECK-NEXT: br i1 [[OR_COND775]], label [[LOR_LHS_FALSE8:%.*]], label [[RETURN:%.*]] +; CHECK-NEXT: br i1 [[OR_COND775]], label [[LOR_LHS_FALSE8:%.*]], label [[COMMON_RET:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: lor.lhs.false8: ; CHECK-NEXT: tail call void @foo1() -; CHECK-NEXT: ret void -; CHECK: return: -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; %cmp = icmp ult i32 %x, 3 %switch = icmp ult i32 %y, 2 @@ -800,12 +827,12 @@ ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[X:%.*]], 3 ; CHECK-NEXT: [[SWITCH:%.*]] = icmp ult i32 [[Y:%.*]], 2 ; CHECK-NEXT: [[OR_COND775:%.*]] = select i1 [[CMP]], i1 true, i1 [[SWITCH]] -; CHECK-NEXT: br i1 [[OR_COND775]], label [[LOR_LHS_FALSE8:%.*]], label [[RETURN:%.*]] +; CHECK-NEXT: br i1 [[OR_COND775]], label [[LOR_LHS_FALSE8:%.*]], label [[COMMON_RET:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: lor.lhs.false8: ; CHECK-NEXT: tail call void @foo1() -; CHECK-NEXT: ret void -; CHECK: return: -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; %cmp = icmp ult i32 %x, 3 %switch = icmp ult i32 %y, 2 @@ -900,16 +927,16 @@ ; Form a switch when and'ing a negated power of two define void @test19(i32 %arg) { ; CHECK-LABEL: @test19( -; CHECK-NEXT: switch i32 [[ARG:%.*]], label [[ELSE:%.*]] [ +; CHECK-NEXT: switch i32 [[ARG:%.*]], label [[COMMON_RET:%.*]] [ ; CHECK-NEXT: i32 32, label [[IF:%.*]] ; CHECK-NEXT: i32 13, label [[IF]] ; CHECK-NEXT: i32 12, label [[IF]] ; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: if: ; CHECK-NEXT: call void @foo1() -; CHECK-NEXT: ret void -; CHECK: else: -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; %and = and i32 %arg, -2 %cmp1 = icmp eq i32 %and, 12 @@ -927,16 +954,16 @@ define void @test19_select(i32 %arg) { ; CHECK-LABEL: @test19_select( -; CHECK-NEXT: switch i32 [[ARG:%.*]], label [[ELSE:%.*]] [ +; CHECK-NEXT: switch i32 [[ARG:%.*]], label [[COMMON_RET:%.*]] [ ; CHECK-NEXT: i32 32, label [[IF:%.*]] ; CHECK-NEXT: i32 13, label [[IF]] ; CHECK-NEXT: i32 12, label [[IF]] ; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: if: ; CHECK-NEXT: call void @foo1() -; CHECK-NEXT: ret void -; CHECK: else: -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; %and = and i32 %arg, -2 %cmp1 = icmp eq i32 %and, 12 @@ -959,12 +986,12 @@ ; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[AND]], 13 ; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[ARG]], 32 ; CHECK-NEXT: [[PRED:%.*]] = or i1 [[CMP1]], [[CMP2]] -; CHECK-NEXT: br i1 [[PRED]], label [[IF:%.*]], label [[ELSE:%.*]] +; CHECK-NEXT: br i1 [[PRED]], label [[IF:%.*]], label [[COMMON_RET:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: if: ; CHECK-NEXT: call void @foo1() -; CHECK-NEXT: ret void -; CHECK: else: -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; %and = and i32 %arg, -2 %cmp1 = icmp eq i32 %and, 13 @@ -984,15 +1011,15 @@ define void @test21(i32 %arg) { ; CHECK-LABEL: @test21( ; CHECK-NEXT: switch i32 [[ARG:%.*]], label [[IF:%.*]] [ -; CHECK-NEXT: i32 32, label [[ELSE:%.*]] -; CHECK-NEXT: i32 13, label [[ELSE]] -; CHECK-NEXT: i32 12, label [[ELSE]] +; CHECK-NEXT: i32 32, label [[COMMON_RET:%.*]] +; CHECK-NEXT: i32 13, label [[COMMON_RET]] +; CHECK-NEXT: i32 12, label [[COMMON_RET]] ; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: if: ; CHECK-NEXT: call void @foo1() -; CHECK-NEXT: ret void -; CHECK: else: -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; %and = or i32 %arg, 1 %cmp1 = icmp ne i32 %and, 13 @@ -1015,12 +1042,12 @@ ; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[AND]], 12 ; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i32 [[ARG]], 32 ; CHECK-NEXT: [[PRED:%.*]] = and i1 [[CMP1]], [[CMP2]] -; CHECK-NEXT: br i1 [[PRED]], label [[IF:%.*]], label [[ELSE:%.*]] +; CHECK-NEXT: br i1 [[PRED]], label [[IF:%.*]], label [[COMMON_RET:%.*]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: if: ; CHECK-NEXT: call void @foo1() -; CHECK-NEXT: ret void -; CHECK: else: -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; %and = or i32 %arg, 1 %cmp1 = icmp ne i32 %and, 12 diff --git a/llvm/test/Transforms/SimplifyCFG/switch_switch_fold.ll b/llvm/test/Transforms/SimplifyCFG/switch_switch_fold.ll --- a/llvm/test/Transforms/SimplifyCFG/switch_switch_fold.ll +++ b/llvm/test/Transforms/SimplifyCFG/switch_switch_fold.ll @@ -20,15 +20,17 @@ ; CHECK-NEXT: i32 18, label [[B]] ; CHECK-NEXT: i32 42, label [[D:%.*]] ; CHECK-NEXT: ] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: A: ; CHECK-NEXT: call void @foo1() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: B: ; CHECK-NEXT: call void @foo2() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; CHECK: D: ; CHECK-NEXT: call void @foo4() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; CHECK: infloop: ; CHECK-NEXT: br label [[INFLOOP]] ; diff --git a/llvm/test/Transforms/SimplifyCFG/switch_thread.ll b/llvm/test/Transforms/SimplifyCFG/switch_thread.ll --- a/llvm/test/Transforms/SimplifyCFG/switch_thread.ll +++ b/llvm/test/Transforms/SimplifyCFG/switch_thread.ll @@ -13,16 +13,16 @@ ; CHECK-LABEL: @test1( ; CHECK-NEXT: switch i32 [[V:%.*]], label [[A:%.*]] [ ; CHECK-NEXT: i32 4, label [[T:%.*]] -; CHECK-NEXT: i32 17, label [[DONE:%.*]] +; CHECK-NEXT: i32 17, label [[COMMON_RET:%.*]] ; CHECK-NEXT: ] ; CHECK: T: ; CHECK-NEXT: call void @foo1() ; CHECK-NEXT: call void @foo2() -; CHECK-NEXT: br label [[DONE]] +; CHECK-NEXT: br label [[COMMON_RET]] ; CHECK: A: ; CHECK-NEXT: call void @foo1() -; CHECK-NEXT: br label [[DONE]] -; CHECK: Done: +; CHECK-NEXT: br label [[COMMON_RET]] +; CHECK: common.ret: ; CHECK-NEXT: ret void ; switch i32 %V, label %A [ @@ -68,21 +68,21 @@ ; CHECK-NEXT: switch i32 [[V:%.*]], label [[A:%.*]] [ ; CHECK-NEXT: i32 4, label [[T:%.*]] ; CHECK-NEXT: i32 17, label [[D:%.*]] -; CHECK-NEXT: i32 1234, label [[E:%.*]] +; CHECK-NEXT: i32 1234, label [[COMMON_RET:%.*]] ; CHECK-NEXT: ] ; CHECK: A: ; CHECK-NEXT: call void @foo1() ; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[V]], 42 -; CHECK-NEXT: br i1 [[COND]], label [[D]], label [[E]] +; CHECK-NEXT: br i1 [[COND]], label [[D]], label [[COMMON_RET]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: T: ; CHECK-NEXT: call void @foo1() ; CHECK-NEXT: call void @foo1() -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; CHECK: D: ; CHECK-NEXT: call void @foo1() -; CHECK-NEXT: ret void -; CHECK: E: -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; switch i32 %V, label %A [ i32 4, label %T diff --git a/llvm/test/Transforms/SimplifyCFG/unprofitable-pr.ll b/llvm/test/Transforms/SimplifyCFG/unprofitable-pr.ll --- a/llvm/test/Transforms/SimplifyCFG/unprofitable-pr.ll +++ b/llvm/test/Transforms/SimplifyCFG/unprofitable-pr.ll @@ -18,6 +18,8 @@ ; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) ; CHECK-NEXT: store volatile i64 0, i64* [[PTR]], align 8 ; CHECK-NEXT: store volatile i64 3, i64* [[PTR]], align 8 +; CHECK-NEXT: br label [[COMMON_RET:%.*]] +; CHECK: common.ret: ; CHECK-NEXT: ret void ; CHECK: true2.critedge: ; CHECK-NEXT: [[PTRINT_C:%.*]] = ptrtoint i64* [[PTR]] to i64 @@ -26,7 +28,7 @@ ; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND_C]]) ; CHECK-NEXT: store volatile i64 0, i64* [[PTR]], align 8 ; CHECK-NEXT: store volatile i64 2, i64* [[PTR]], align 8 -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; br i1 %c, label %true1, label %false1 @@ -68,6 +70,8 @@ ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 ; CHECK-NEXT: store volatile i64 3, i64* [[PTR]], align 8 +; CHECK-NEXT: br label [[COMMON_RET:%.*]] +; CHECK: common.ret: ; CHECK-NEXT: ret void ; CHECK: true2.critedge: ; CHECK-NEXT: [[PTRINT_C:%.*]] = ptrtoint i64* [[PTR]] to i64 @@ -81,7 +85,7 @@ ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 ; CHECK-NEXT: store volatile i64 2, i64* [[PTR]], align 8 -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; br i1 %c, label %true1, label %false1 @@ -131,12 +135,14 @@ ; CHECK: false1: ; CHECK-NEXT: store volatile i64 1, i64* [[PTR]], align 4 ; CHECK-NEXT: br label [[TRUE1]] +; CHECK: common.ret: +; CHECK-NEXT: ret void ; CHECK: true2: ; CHECK-NEXT: store volatile i64 2, i64* [[PTR]], align 8 -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET:%.*]] ; CHECK: false2: ; CHECK-NEXT: store volatile i64 3, i64* [[PTR]], align 8 -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; br i1 %c, label %true1, label %false1 @@ -184,6 +190,8 @@ ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 ; CHECK-NEXT: store volatile i64 3, i64* [[PTR]], align 8 +; CHECK-NEXT: br label [[COMMON_RET:%.*]] +; CHECK: common.ret: ; CHECK-NEXT: ret void ; CHECK: true2.critedge: ; CHECK-NEXT: [[VTABLEI8_C:%.*]] = bitcast [3 x i8*]* [[VTABLE]] to i8* @@ -196,7 +204,7 @@ ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 ; CHECK-NEXT: store volatile i64 -1, i64* [[PTR]], align 8 ; CHECK-NEXT: store volatile i64 2, i64* [[PTR]], align 8 -; CHECK-NEXT: ret void +; CHECK-NEXT: br label [[COMMON_RET]] ; br i1 %c, label %true1, label %false1