diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -3638,6 +3638,12 @@ // sections mode (`=all` and `=list=`). if ((MF->hasBBLabels() || MBB.isBeginSection()) && !MBB.isEntryBlock()) return true; + // Retain labels for INLINEASM_BR indirect targets. + // TODO: should callers of + // MachineBasicBlock::setIsInlineAsmBrIndirectTarget(true) also set + // MachineBasicBlock::setLabelMustBeEmitted()? + if (MBB.isInlineAsmBrIndirectTarget()) + return true; // A label is needed for any block with at least one predecessor (when that // predecessor is not the fallthrough predecessor, or if it is an EH funclet // entry, or if a label is forced). diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -3000,7 +3000,6 @@ BasicBlock *Dest = I.getIndirectDest(i); MachineBasicBlock *Target = FuncInfo.MBBMap[Dest]; Target->setIsInlineAsmBrIndirectTarget(); - Target->setHasAddressTaken(); // Don't add duplicate machine successors. if (Dests.insert(Dest).second) addSuccessorWithProb(CallBrMBB, Target, BranchProbability::getZero()); diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -5287,14 +5287,18 @@ case InlineAsm::isInput: OpInfo.CallOperandVal = Call.getArgOperand(ArgNo); break; - case InlineAsm::isLabel: - OpInfo.CallOperandVal = - cast(&Call)->getBlockAddressForIndirectDest(LabelNo); - OpInfo.ConstraintVT = - getAsmOperandValueType(DL, OpInfo.CallOperandVal->getType()) - .getSimpleVT(); + case InlineAsm::isLabel: { + OpInfo.CallOperandVal = cast(&Call)->getIndirectDest(LabelNo); + + // TODO: is this type correct? The BlockAddress seems unnecessary... + const BlockAddress *BA = + cast(&Call)->getBlockAddressForIndirectDest(LabelNo); + Type *T = BA->getType(); + + OpInfo.ConstraintVT = getAsmOperandValueType(DL, T).getSimpleVT(); ++LabelNo; continue; + } case InlineAsm::isClobber: // Nothing to do. break; diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -55706,9 +55706,9 @@ // In any sort of PIC mode addresses need to be computed at runtime by // adding in a register or some sort of table lookup. These can't - // be used as immediates. BlockAddresses are fine though. + // be used as immediates. BlockAddresses and BasicBlocks are fine though. if ((Subtarget.isPICStyleGOT() || Subtarget.isPICStyleStubPIC()) && - !isa(Op)) + !(isa(Op) || isa(Op))) return; // If we are in non-pic codegen mode, we allow the address of a global (with diff --git a/llvm/test/CodeGen/AArch64/callbr-asm-label.ll b/llvm/test/CodeGen/AArch64/callbr-asm-label.ll --- a/llvm/test/CodeGen/AArch64/callbr-asm-label.ll +++ b/llvm/test/CodeGen/AArch64/callbr-asm-label.ll @@ -5,9 +5,9 @@ define i32 @test1() { ; CHECK-LABEL: test1: ; CHECK: .word b -; CHECK-NEXT: .word .Ltmp0 +; CHECK-NEXT: .word .LBB0_2 ; CHECK: // %bb.1: -; CHECK: .Ltmp0: +; CHECK: .Ltmp1: ; CHECK: .LBB0_2: // %indirect entry: callbr void asm sideeffect "1:\0A\09.word b, ${0:l}\0A\09", "!i"() @@ -31,8 +31,8 @@ if.then: ; CHECK: .word b -; CHECK-NEXT: .word .Ltmp2 -; CHECK: .Ltmp2: +; CHECK-NEXT: .word .LBB1_3 +; CHECK: .Ltmp3: ; CHECK-NEXT: .LBB1_3: // %if.end6 callbr void asm sideeffect "1:\0A\09.word b, ${0:l}\0A\09", "!i"() to label %if.then4 [label %if.end6] diff --git a/llvm/test/CodeGen/X86/callbr-asm-bb-exports.ll b/llvm/test/CodeGen/X86/callbr-asm-bb-exports.ll --- a/llvm/test/CodeGen/X86/callbr-asm-bb-exports.ll +++ b/llvm/test/CodeGen/X86/callbr-asm-bb-exports.ll @@ -6,7 +6,6 @@ ; output from SelectionDAG. ; CHECK: t0: ch = EntryToken -; CHECK-NEXT: t16: i64 = BlockAddress<@test, %fail> 0 ; CHECK-NEXT: t4: i32,ch = CopyFromReg t0, Register:i32 %3 ; CHECK-NEXT: t10: i32 = add t4, Constant:i32<1> ; CHECK-NEXT: t12: ch = CopyToReg t0, Register:i32 %0, t10 @@ -17,7 +16,7 @@ ; CHECK-NEXT: t2: i32,ch = CopyFromReg t0, Register:i32 %2 ; CHECK-NEXT: t8: i32 = add t2, Constant:i32<4> ; CHECK-NEXT: t22: ch,glue = CopyToReg t17, Register:i32 %5, t8 -; CHECK-NEXT: t30: ch,glue = inlineasm_br t22, TargetExternalSymbol:i64'xorl $0, $0; jmp ${1:l}', MDNode:ch, TargetConstant:i64<0>, TargetConstant:i32<2359305>, Register:i32 %5, TargetConstant:i64<13>, TargetBlockAddress:i64<@test, %fail> 0, TargetConstant:i32<12>, Register:i32 $df, TargetConstant:i32<12>, Register:i16 $fpsw, TargetConstant:i32<12>, Register:i32 $eflags, t22:1 +; CHECK-NEXT: t29: ch,glue = inlineasm_br t22, TargetExternalSymbol:i64'xorl $0, $0; jmp ${1:l}', MDNode:ch, TargetConstant:i64<0>, TargetConstant:i32<2359305>, Register:i32 %5, TargetConstant:i64<13>, BasicBlock:ch, TargetConstant:i32<12>, Register:i32 $df, TargetConstant:i32<12>, Register:i16 $fpsw, TargetConstant:i32<12>, Register:i32 $eflags, t22:1 define i32 @test(i32 %a, i32 %b, i32 %c) { entry: diff --git a/llvm/test/CodeGen/X86/callbr-asm-branch-folding.ll b/llvm/test/CodeGen/X86/callbr-asm-branch-folding.ll --- a/llvm/test/CodeGen/X86/callbr-asm-branch-folding.ll +++ b/llvm/test/CodeGen/X86/callbr-asm-branch-folding.ll @@ -56,7 +56,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: jmp .LBB0_9 ; CHECK-NEXT: .Ltmp0: # Block address taken -; CHECK-NEXT: # %bb.7: # %if.then20.critedge +; CHECK-NEXT: .LBB0_7: # %if.then20.critedge ; CHECK-NEXT: movl j(%rip), %edi ; CHECK-NEXT: movslq %eax, %rcx ; CHECK-NEXT: movl $1, %esi diff --git a/llvm/test/CodeGen/X86/callbr-asm-destinations.ll b/llvm/test/CodeGen/X86/callbr-asm-destinations.ll --- a/llvm/test/CodeGen/X86/callbr-asm-destinations.ll +++ b/llvm/test/CodeGen/X86/callbr-asm-destinations.ll @@ -8,10 +8,10 @@ ; CHECK-NEXT: addl $4, %eax ; CHECK-NEXT: #APP ; CHECK-NEXT: xorl %eax, %eax -; CHECK-NEXT: jmp .Ltmp0 +; CHECK-NEXT: jmp .LBB0_1 ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: .Ltmp0: # Block address taken -; CHECK-NEXT: # %bb.1: # %fail +; CHECK-NEXT: .LBB0_1: # %fail ; CHECK-NEXT: movl $1, %eax ; CHECK-NEXT: retl entry: @@ -29,8 +29,8 @@ ; CHECK-NEXT: addl $4, %eax ; CHECK-NEXT: #APP ; CHECK-NEXT: xorl %eax, %eax -; CHECK-NEXT: jmp .Ltmp1 -; CHECK-NEXT: jmp .Ltmp1 +; CHECK-NEXT: jmp .LBB1_2 +; CHECK-NEXT: jmp .LBB1_2 ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: # %bb.1: # %normal ; CHECK-NEXT: retl diff --git a/llvm/test/CodeGen/X86/callbr-asm-instr-scheduling.ll b/llvm/test/CodeGen/X86/callbr-asm-instr-scheduling.ll --- a/llvm/test/CodeGen/X86/callbr-asm-instr-scheduling.ll +++ b/llvm/test/CodeGen/X86/callbr-asm-instr-scheduling.ll @@ -33,11 +33,11 @@ ; CHECK-NEXT: andl $511, %eax # imm = 0x1FF ; CHECK-NEXT: leaq (%rdx,%rax,8), %rax ; CHECK-NEXT: #APP +; CHECK-NEXT: .Ltmp0: +; CHECK-NEXT: jmp .Ltmp1 ; CHECK-NEXT: .Ltmp2: -; CHECK-NEXT: jmp .Ltmp3 -; CHECK-NEXT: .Ltmp4: -; CHECK-NEXT: .zero (-(((.Ltmp5-.Ltmp6)-(.Ltmp4-.Ltmp2))>0))*((.Ltmp5-.Ltmp6)-(.Ltmp4-.Ltmp2)),144 -; CHECK-NEXT: .Ltmp7: +; CHECK-NEXT: .zero (-(((.Ltmp3-.Ltmp4)-(.Ltmp2-.Ltmp0))>0))*((.Ltmp3-.Ltmp4)-(.Ltmp2-.Ltmp0)),144 +; CHECK-NEXT: .Ltmp5: entry: %0 = tail call i64 asm sideeffect "mov %cr3,$0\0A\09", "=r,=*m,~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i64) nonnull @__force_order) %and.i = and i64 %0, 9223372036854771712 diff --git a/llvm/test/CodeGen/X86/callbr-asm-label-addr.ll b/llvm/test/CodeGen/X86/callbr-asm-label-addr.ll --- a/llvm/test/CodeGen/X86/callbr-asm-label-addr.ll +++ b/llvm/test/CodeGen/X86/callbr-asm-label-addr.ll @@ -3,9 +3,9 @@ define i32 @test1(i32 %x) { ; CHECK-LABEL: test1: ; CHECK: .quad .Ltmp0 -; CHECK-NEXT: .quad .Ltmp1 +; CHECK-NEXT: .quad .LBB0_1 ; CHECK: .Ltmp1: -; CHECK-NEXT: # %bb.1: # %bar +; CHECK-NEXT: .LBB0_1: # %bar ; CHECK-NEXT: callq foo ; CHECK-NEXT: .Ltmp0: ; CHECK-NEXT: # %bb.2: # %baz diff --git a/llvm/test/CodeGen/X86/callbr-asm-outputs.ll b/llvm/test/CodeGen/X86/callbr-asm-outputs.ll --- a/llvm/test/CodeGen/X86/callbr-asm-outputs.ll +++ b/llvm/test/CodeGen/X86/callbr-asm-outputs.ll @@ -10,7 +10,7 @@ ; CHECK-NEXT: addl $4, %eax ; CHECK-NEXT: #APP ; CHECK-NEXT: xorl %eax, %eax -; CHECK-NEXT: jmp .Ltmp0 +; CHECK-NEXT: jmp .LBB0_2 ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: # %bb.1: # %normal ; CHECK-NEXT: retl @@ -48,26 +48,26 @@ ; CHECK-NEXT: #APP ; CHECK-NEXT: testl %esi, %esi ; CHECK-NEXT: testl %edi, %esi -; CHECK-NEXT: jne .Ltmp1 +; CHECK-NEXT: jne .LBB1_4 ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: jmp .LBB1_3 ; CHECK-NEXT: .LBB1_2: # %if.else ; CHECK-NEXT: #APP ; CHECK-NEXT: testl %esi, %edi ; CHECK-NEXT: testl %esi, %edi -; CHECK-NEXT: jne .Ltmp2 +; CHECK-NEXT: jne .LBB1_5 ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: .LBB1_3: ; CHECK-NEXT: movl %esi, %eax ; CHECK-NEXT: addl %edi, %eax -; CHECK-NEXT: .Ltmp2: # Block address taken +; CHECK-NEXT: .Ltmp1: # Block address taken ; CHECK-NEXT: .LBB1_5: # %return ; CHECK-NEXT: popl %esi ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: popl %edi ; CHECK-NEXT: .cfi_def_cfa_offset 4 ; CHECK-NEXT: retl -; CHECK-NEXT: .Ltmp1: # Block address taken +; CHECK-NEXT: .Ltmp2: # Block address taken ; CHECK-NEXT: .LBB1_4: # %label_true ; CHECK-NEXT: .cfi_def_cfa_offset 12 ; CHECK-NEXT: movl $-2, %eax @@ -163,13 +163,13 @@ ; CHECK-NEXT: #APP ; CHECK-NEXT: testl %ecx, %ecx ; CHECK-NEXT: testl %edx, %ecx -; CHECK-NEXT: jne .Ltmp4 +; CHECK-NEXT: jne .LBB3_3 ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: # %bb.1: # %asm.fallthrough ; CHECK-NEXT: #APP ; CHECK-NEXT: testl %ecx, %edx ; CHECK-NEXT: testl %ecx, %edx -; CHECK-NEXT: jne .Ltmp5 +; CHECK-NEXT: jne .LBB3_4 ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: # %bb.2: # %asm.fallthrough2 ; CHECK-NEXT: addl %edx, %ecx @@ -215,7 +215,7 @@ ; CHECK-NEXT: #APP ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: .Ltmp6: # Block address taken -; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: .LBB4_1: ; CHECK-NEXT: retl %1 = call i32 @llvm.read_register.i32(metadata !3) %2 = callbr i32 asm "", "={esp},!i,{esp},~{dirflag},~{fpsr},~{flags}"(i32 %1) diff --git a/llvm/test/CodeGen/X86/callbr-asm-sink.ll b/llvm/test/CodeGen/X86/callbr-asm-sink.ll --- a/llvm/test/CodeGen/X86/callbr-asm-sink.ll +++ b/llvm/test/CodeGen/X86/callbr-asm-sink.ll @@ -12,7 +12,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: leaq 8(%rdi), %rax ; CHECK-NEXT: #APP -; CHECK-NEXT: # 8(%rdi) .Ltmp0 +; CHECK-NEXT: # 8(%rdi) .LBB0_1 ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: # %bb.2: ; CHECK-NEXT: retq diff --git a/llvm/test/CodeGen/X86/callbr-asm.ll b/llvm/test/CodeGen/X86/callbr-asm.ll --- a/llvm/test/CodeGen/X86/callbr-asm.ll +++ b/llvm/test/CodeGen/X86/callbr-asm.ll @@ -12,7 +12,7 @@ ; CHECK-NEXT: addl $4, %eax ; CHECK-NEXT: #APP ; CHECK-NEXT: xorl %eax, %eax -; CHECK-NEXT: jmp .Ltmp0 +; CHECK-NEXT: jmp .LBB0_2 ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: # %bb.1: # %normal ; CHECK-NEXT: xorl %eax, %eax @@ -41,7 +41,7 @@ ; CHECK-NEXT: #APP ; CHECK-EMPTY: ; CHECK-NEXT: xorl %eax, %eax -; CHECK-NEXT: jmp .Ltmp1 +; CHECK-NEXT: jmp .LBB1_2 ; CHECK-EMPTY: ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: # %bb.1: # %normal @@ -115,17 +115,17 @@ ; CHECK-NEXT: # Parent Loop BB3_3 Depth=3 ; CHECK-NEXT: # => This Inner Loop Header: Depth=4 ; CHECK-NEXT: #APP -; CHECK-NEXT: jmp .Ltmp2 -; CHECK-NEXT: jmp .Ltmp3 -; CHECK-NEXT: jmp .Ltmp4 +; CHECK-NEXT: jmp .LBB3_1 +; CHECK-NEXT: jmp .LBB3_2 +; CHECK-NEXT: jmp .LBB3_3 ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: # %bb.5: # %normal0 ; CHECK-NEXT: # in Loop: Header=BB3_4 Depth=4 ; CHECK-NEXT: #APP -; CHECK-NEXT: jmp .Ltmp2 -; CHECK-NEXT: jmp .Ltmp3 -; CHECK-NEXT: jmp .Ltmp4 -; CHECK-NEXT: jmp .Ltmp5 +; CHECK-NEXT: jmp .LBB3_1 +; CHECK-NEXT: jmp .LBB3_2 +; CHECK-NEXT: jmp .LBB3_3 +; CHECK-NEXT: jmp .LBB3_4 ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: # %bb.6: # %normal1 ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax @@ -165,11 +165,11 @@ ; CHECK-LABEL: test4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: #APP -; CHECK-NEXT: ja .Ltmp6 +; CHECK-NEXT: ja .LBB4_3 ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: # %bb.1: # %asm.fallthrough ; CHECK-NEXT: #APP -; CHECK-NEXT: ja .Ltmp6 +; CHECK-NEXT: ja .LBB4_3 ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: .Ltmp6: # Block address taken ; CHECK-NEXT: .LBB4_3: # %quux diff --git a/llvm/test/CodeGen/X86/inline-asm-pic.ll b/llvm/test/CodeGen/X86/inline-asm-pic.ll --- a/llvm/test/CodeGen/X86/inline-asm-pic.ll +++ b/llvm/test/CodeGen/X86/inline-asm-pic.ll @@ -26,7 +26,7 @@ ; CHECK-LABEL: x: ; CHECK: ## %bb.0: ; CHECK-NEXT: ## InlineAsm Start -; CHECK-NEXT: ## Ltmp0 +; CHECK-NEXT: ## LBB1_1 ; CHECK-EMPTY: ; CHECK-NEXT: ## InlineAsm End ; CHECK-NEXT: ## %bb.2: ## %return diff --git a/llvm/test/CodeGen/X86/shrinkwrap-callbr.ll b/llvm/test/CodeGen/X86/shrinkwrap-callbr.ll --- a/llvm/test/CodeGen/X86/shrinkwrap-callbr.ll +++ b/llvm/test/CodeGen/X86/shrinkwrap-callbr.ll @@ -20,7 +20,7 @@ ; CHECK-NEXT: # %bb.1: # %if.end ; CHECK-NEXT: callq fn ; CHECK-NEXT: #APP -; CHECK-NEXT: # jump to .Ltmp0 +; CHECK-NEXT: # jump to .LBB0_4 ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: # %bb.2: # %return ; CHECK-NEXT: movl $4, %eax diff --git a/llvm/test/CodeGen/X86/tail-dup-asm-goto.ll b/llvm/test/CodeGen/X86/tail-dup-asm-goto.ll --- a/llvm/test/CodeGen/X86/tail-dup-asm-goto.ll +++ b/llvm/test/CodeGen/X86/tail-dup-asm-goto.ll @@ -35,7 +35,7 @@ ; CHECK-NEXT: successors: %bb.5(0x80000000), %bb.4(0x00000000) ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[PHI:%[0-9]+]]:gr64 = PHI [[COPY]], %bb.2, [[MOV64rm]], %bb.1 - ; CHECK-NEXT: INLINEASM_BR &"#$0 $1 $2", 9 /* sideeffect mayload attdialect */, 13 /* imm */, 42, 13 /* imm */, 0, 13 /* imm */, blockaddress(@test1, %ir-block.bb17.i.i.i), 12 /* clobber */, implicit-def early-clobber $df, 12 /* clobber */, implicit-def early-clobber $fpsw, 12 /* clobber */, implicit-def early-clobber $eflags + ; CHECK-NEXT: INLINEASM_BR &"#$0 $1 $2", 9 /* sideeffect mayload attdialect */, 13 /* imm */, 42, 13 /* imm */, 0, 13 /* imm */, %bb.4, 12 /* clobber */, implicit-def early-clobber $df, 12 /* clobber */, implicit-def early-clobber $fpsw, 12 /* clobber */, implicit-def early-clobber $eflags ; CHECK-NEXT: JMP_1 %bb.5 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: bb.4.bb17.i.i.i (address-taken, inlineasm-br-indirect-target):