diff --git a/llvm/lib/CodeGen/ShrinkWrap.cpp b/llvm/lib/CodeGen/ShrinkWrap.cpp --- a/llvm/lib/CodeGen/ShrinkWrap.cpp +++ b/llvm/lib/CodeGen/ShrinkWrap.cpp @@ -504,6 +504,10 @@ } for (const MachineInstr &MI : MBB) { + if (MI.getOpcode() == TargetOpcode::INLINEASM_BR) { + LLVM_DEBUG(dbgs() << "inlineasm_br prevents shrink-wrapping\n"); + return false; + } if (!useOrDefCSROrFI(MI, RS.get())) continue; // Save (resp. restore) point must dominate (resp. post dominate) diff --git a/llvm/test/CodeGen/PowerPC/ppc64-inlineasm-clobber.ll b/llvm/test/CodeGen/PowerPC/ppc64-inlineasm-clobber.ll --- a/llvm/test/CodeGen/PowerPC/ppc64-inlineasm-clobber.ll +++ b/llvm/test/CodeGen/PowerPC/ppc64-inlineasm-clobber.ll @@ -75,43 +75,41 @@ define dso_local signext i32 @ClobberLR_BR(i32 signext %in) #0 { ; PPC64LE-LABEL: ClobberLR_BR: ; PPC64LE: # %bb.0: # %entry +; PPC64LE-NEXT: mflr r0 +; PPC64LE-NEXT: std r0, 16(r1) +; PPC64LE-NEXT: stdu r1, -32(r1) ; PPC64LE-NEXT: #APP ; PPC64LE-NEXT: nop ; PPC64LE-NEXT: #NO_APP -; PPC64LE-NEXT: # %bb.1: # %return +; PPC64LE-NEXT: .LBB3_1: # %return ; PPC64LE-NEXT: extsw r3, r3 -; PPC64LE-NEXT: blr -; PPC64LE-NEXT: .Ltmp0: # Block address taken -; PPC64LE-NEXT: .LBB3_2: # %return_early -; PPC64LE-NEXT: mflr r0 -; PPC64LE-NEXT: std r0, 16(r1) -; PPC64LE-NEXT: stdu r1, -32(r1) -; PPC64LE-NEXT: li r3, 0 ; PPC64LE-NEXT: addi r1, r1, 32 ; PPC64LE-NEXT: ld r0, 16(r1) ; PPC64LE-NEXT: mtlr r0 -; PPC64LE-NEXT: extsw r3, r3 ; PPC64LE-NEXT: blr +; PPC64LE-NEXT: .Ltmp0: # Block address taken +; PPC64LE-NEXT: .LBB3_2: # %return_early +; PPC64LE-NEXT: li r3, 0 +; PPC64LE-NEXT: b .LBB3_1 ; ; PPC64BE-LABEL: ClobberLR_BR: ; PPC64BE: # %bb.0: # %entry +; PPC64BE-NEXT: mflr r0 +; PPC64BE-NEXT: std r0, 16(r1) +; PPC64BE-NEXT: stdu r1, -48(r1) ; PPC64BE-NEXT: #APP ; PPC64BE-NEXT: nop ; PPC64BE-NEXT: #NO_APP -; PPC64BE-NEXT: # %bb.1: # %return +; PPC64BE-NEXT: .LBB3_1: # %return ; PPC64BE-NEXT: extsw r3, r3 -; PPC64BE-NEXT: blr -; PPC64BE-NEXT: .Ltmp0: # Block address taken -; PPC64BE-NEXT: .LBB3_2: # %return_early -; PPC64BE-NEXT: mflr r0 -; PPC64BE-NEXT: std r0, 16(r1) -; PPC64BE-NEXT: stdu r1, -48(r1) -; PPC64BE-NEXT: li r3, 0 ; PPC64BE-NEXT: addi r1, r1, 48 ; PPC64BE-NEXT: ld r0, 16(r1) ; PPC64BE-NEXT: mtlr r0 -; PPC64BE-NEXT: extsw r3, r3 ; PPC64BE-NEXT: blr +; PPC64BE-NEXT: .Ltmp0: # Block address taken +; PPC64BE-NEXT: .LBB3_2: # %return_early +; PPC64BE-NEXT: li r3, 0 +; PPC64BE-NEXT: b .LBB3_1 entry: callbr void asm sideeffect "nop", "X,~{lr}"(i8* blockaddress(@ClobberLR_BR, %return_early)) to label %return [label %return_early]