Index: llvm/lib/Target/X86/X86.h =================================================================== --- llvm/lib/Target/X86/X86.h +++ llvm/lib/Target/X86/X86.h @@ -120,7 +120,7 @@ FunctionPass *createX86EvexToVexInsts(); /// This pass creates the thunks for the retpoline feature. -FunctionPass *createX86RetpolineThunksPass(); +ModulePass *createX86RetpolineThunksPass(); /// This pass ensures instructions featuring a memory operand /// have distinctive (with respect to eachother) Index: llvm/lib/Target/X86/X86RetpolineThunks.cpp =================================================================== --- llvm/lib/Target/X86/X86RetpolineThunks.cpp +++ llvm/lib/Target/X86/X86RetpolineThunks.cpp @@ -45,67 +45,51 @@ static const char EDIThunkName[] = "__llvm_retpoline_edi"; namespace { -class X86RetpolineThunks : public MachineFunctionPass { +class X86RetpolineThunks : public ModulePass { public: static char ID; - X86RetpolineThunks() : MachineFunctionPass(ID) {} + X86RetpolineThunks() : ModulePass(ID) {} StringRef getPassName() const override { return "X86 Retpoline Thunks"; } - bool doInitialization(Module &M) override; - bool runOnMachineFunction(MachineFunction &F) override; + bool runOnModule(Module &M) override; void getAnalysisUsage(AnalysisUsage &AU) const override { - MachineFunctionPass::getAnalysisUsage(AU); + ModulePass::getAnalysisUsage(AU); AU.addRequired(); AU.addPreserved(); } private: - MachineModuleInfo *MMI = nullptr; - const TargetMachine *TM = nullptr; - bool Is64Bit = false; - const X86Subtarget *STI = nullptr; - const X86InstrInfo *TII = nullptr; - - bool InsertedThunks = false; - - void createThunkFunction(Module &M, StringRef Name); - void insertRegReturnAddrClobber(MachineBasicBlock &MBB, unsigned Reg); - void populateThunk(MachineFunction &MF, unsigned Reg); + void createThunkFunction(Module &M, MachineModuleInfo &MMI, + const TargetInstrInfo *TII, StringRef Name, + unsigned Reg, bool Is64Bit); + void insertRegReturnAddrClobber(const TargetInstrInfo *TII, + MachineBasicBlock &MBB, unsigned Reg, + bool Is64Bit); + void populateThunk(const TargetInstrInfo *TII, MachineFunction &MF, + unsigned Reg, bool Is64Bit); }; } // end anonymous namespace -FunctionPass *llvm::createX86RetpolineThunksPass() { +ModulePass *llvm::createX86RetpolineThunksPass() { return new X86RetpolineThunks(); } char X86RetpolineThunks::ID = 0; -bool X86RetpolineThunks::doInitialization(Module &M) { - InsertedThunks = false; - return false; -} - -bool X86RetpolineThunks::runOnMachineFunction(MachineFunction &MF) { +bool X86RetpolineThunks::runOnModule(Module &M) { LLVM_DEBUG(dbgs() << getPassName() << '\n'); - TM = &MF.getTarget();; - STI = &MF.getSubtarget(); - TII = STI->getInstrInfo(); - Is64Bit = TM->getTargetTriple().getArch() == Triple::x86_64; + MachineModuleInfo &MMI = getAnalysis().getMMI(); + const TargetMachine &TM = MMI.getTarget(); + bool Is64Bit = TM.getTargetTriple().getArch() == Triple::x86_64; - MMI = &getAnalysis().getMMI(); - Module &M = const_cast(*MMI->getModule()); - - // If this function is not a thunk, check to see if we need to insert - // a thunk. - if (!MF.getName().startswith(ThunkNamePrefix)) { - // If we've already inserted a thunk, nothing else to do. - if (InsertedThunks) - return false; + for (auto &F : M) { + const X86Subtarget &STI = TM.getSubtarget(F); + const TargetInstrInfo *TII = STI.getInstrInfo(); // Only add a thunk if one of the functions has the retpoline feature // enabled in its subtarget, and doesn't enable external thunks. @@ -114,85 +98,71 @@ // FIXME: It's a little silly to look at every function just to enumerate // the subtargets, but eventually we'll want to look at them for indirect // calls, so maybe this is OK. - if ((!STI->useRetpolineIndirectCalls() && - !STI->useRetpolineIndirectBranches()) || - STI->useRetpolineExternalThunk()) - return false; + if ((!STI.useRetpolineIndirectCalls() && + !STI.useRetpolineIndirectBranches()) || + STI.useRetpolineExternalThunk()) + continue; + + LLVM_DEBUG(dbgs() << F.getName() << " requires retpoline\n"); - // Otherwise, we need to insert the thunk. // WARNING: This is not really a well behaving thing to do in a function // pass. We extract the module and insert a new function (and machine // function) directly into the module. - if (Is64Bit) - createThunkFunction(M, R11ThunkName); - else - for (StringRef Name : - {EAXThunkName, ECXThunkName, EDXThunkName, EDIThunkName}) - createThunkFunction(M, Name); - InsertedThunks = true; + if (Is64Bit) { + // __llvm_retpoline_r11: + // callq .Lr11_call_target + // .Lr11_capture_spec: + // pause + // lfence + // jmp .Lr11_capture_spec + // .align 16 + // .Lr11_call_target: + // movq %r11, (%rsp) + // retq + createThunkFunction(M, MMI, TII, R11ThunkName, X86::R11, true); + } else { + // For 32-bit targets we need to emit a collection of thunks for various + // possible scratch registers as well as a fallback that uses EDI, which + // is normally callee saved. + // __llvm_retpoline_eax: + // calll .Leax_call_target + // .Leax_capture_spec: + // pause + // jmp .Leax_capture_spec + // .align 16 + // .Leax_call_target: + // movl %eax, (%esp) # Clobber return addr + // retl + // + // __llvm_retpoline_ecx: + // ... # Same setup + // movl %ecx, (%esp) + // retl + // + // __llvm_retpoline_edx: + // ... # Same setup + // movl %edx, (%esp) + // retl + // + // __llvm_retpoline_edi: + // ... # Same setup + // movl %edi, (%esp) + // retl + createThunkFunction(M, MMI, TII, EAXThunkName, X86::EAX, false); + createThunkFunction(M, MMI, TII, ECXThunkName, X86::ECX, false); + createThunkFunction(M, MMI, TII, EDXThunkName, X86::EDX, false); + createThunkFunction(M, MMI, TII, EDIThunkName, X86::EDI, false); + } return true; } - // If this *is* a thunk function, we need to populate it with the correct MI. - if (Is64Bit) { - assert(MF.getName() == "__llvm_retpoline_r11" && - "Should only have an r11 thunk on 64-bit targets"); - - // __llvm_retpoline_r11: - // callq .Lr11_call_target - // .Lr11_capture_spec: - // pause - // lfence - // jmp .Lr11_capture_spec - // .align 16 - // .Lr11_call_target: - // movq %r11, (%rsp) - // retq - populateThunk(MF, X86::R11); - } else { - // For 32-bit targets we need to emit a collection of thunks for various - // possible scratch registers as well as a fallback that uses EDI, which is - // normally callee saved. - // __llvm_retpoline_eax: - // calll .Leax_call_target - // .Leax_capture_spec: - // pause - // jmp .Leax_capture_spec - // .align 16 - // .Leax_call_target: - // movl %eax, (%esp) # Clobber return addr - // retl - // - // __llvm_retpoline_ecx: - // ... # Same setup - // movl %ecx, (%esp) - // retl - // - // __llvm_retpoline_edx: - // ... # Same setup - // movl %edx, (%esp) - // retl - // - // __llvm_retpoline_edi: - // ... # Same setup - // movl %edi, (%esp) - // retl - if (MF.getName() == EAXThunkName) - populateThunk(MF, X86::EAX); - else if (MF.getName() == ECXThunkName) - populateThunk(MF, X86::ECX); - else if (MF.getName() == EDXThunkName) - populateThunk(MF, X86::EDX); - else if (MF.getName() == EDIThunkName) - populateThunk(MF, X86::EDI); - else - llvm_unreachable("Invalid thunk name on x86-32!"); - } - - return true; + return false; } -void X86RetpolineThunks::createThunkFunction(Module &M, StringRef Name) { +void X86RetpolineThunks::createThunkFunction(Module &M, MachineModuleInfo &MMI, + const TargetInstrInfo *TII, + StringRef Name, unsigned Reg, + bool Is64Bit) { assert(Name.startswith(ThunkNamePrefix) && "Created a thunk with an unexpected prefix!"); @@ -219,25 +189,30 @@ // MachineFunctions/MachineBasicBlocks aren't created automatically for the // IR-level constructs we already made. Create them and insert them into the // module. - MachineFunction &MF = MMI->getOrCreateMachineFunction(*F); + MachineFunction &MF = MMI.getOrCreateMachineFunction(*F); MachineBasicBlock *EntryMBB = MF.CreateMachineBasicBlock(Entry); // Insert EntryMBB into MF. It's not in the module until we do this. MF.insert(MF.end(), EntryMBB); + populateThunk(TII, MF, Reg, Is64Bit); } -void X86RetpolineThunks::insertRegReturnAddrClobber(MachineBasicBlock &MBB, - unsigned Reg) { +void X86RetpolineThunks::insertRegReturnAddrClobber(const TargetInstrInfo *TII, + MachineBasicBlock &MBB, + unsigned Reg, + bool Is64Bit) { const unsigned MovOpc = Is64Bit ? X86::MOV64mr : X86::MOV32mr; const unsigned SPReg = Is64Bit ? X86::RSP : X86::ESP; addRegOffset(BuildMI(&MBB, DebugLoc(), TII->get(MovOpc)), SPReg, false, 0) .addReg(Reg); } -void X86RetpolineThunks::populateThunk(MachineFunction &MF, - unsigned Reg) { - // Set MF properties. We never use vregs... +void X86RetpolineThunks::populateThunk(const TargetInstrInfo *TII, + MachineFunction &MF, unsigned Reg, + bool Is64Bit) { + // Set MF properties. We never use vregs or phis... MF.getProperties().set(MachineFunctionProperties::Property::NoVRegs); + MF.getProperties().set(MachineFunctionProperties::Property::NoPHIs); // Grab the entry MBB and erase any other blocks. O0 codegen appears to // generate two bbs for the entry block. @@ -246,8 +221,10 @@ while (MF.size() > 1) MF.erase(std::next(MF.begin())); - MachineBasicBlock *CaptureSpec = MF.CreateMachineBasicBlock(Entry->getBasicBlock()); - MachineBasicBlock *CallTarget = MF.CreateMachineBasicBlock(Entry->getBasicBlock()); + MachineBasicBlock *CaptureSpec = + MF.CreateMachineBasicBlock(Entry->getBasicBlock()); + MachineBasicBlock *CallTarget = + MF.CreateMachineBasicBlock(Entry->getBasicBlock()); MCSymbol *TargetSym = MF.getContext().createTempSymbol(); MF.push_back(CaptureSpec); MF.push_back(CallTarget); @@ -280,7 +257,7 @@ CallTarget->addLiveIn(Reg); CallTarget->setHasAddressTaken(); CallTarget->setAlignment(Align(16)); - insertRegReturnAddrClobber(*CallTarget, Reg); + insertRegReturnAddrClobber(TII, *CallTarget, Reg, Is64Bit); CallTarget->back().setPreInstrSymbol(MF, TargetSym); BuildMI(CallTarget, DebugLoc(), TII->get(RetOpc)); } Index: llvm/test/CodeGen/X86/O0-pipeline.ll =================================================================== --- llvm/test/CodeGen/X86/O0-pipeline.ll +++ llvm/test/CodeGen/X86/O0-pipeline.ll @@ -72,7 +72,8 @@ ; CHECK-NEXT: Contiguously Lay Out Funclets ; CHECK-NEXT: StackMap Liveness Analysis ; CHECK-NEXT: Live DEBUG_VALUE analysis -; CHECK-NEXT: X86 Retpoline Thunks +; CHECK-NEXT: X86 Retpoline Thunks +; CHECK-NEXT: FunctionPass Manager ; CHECK-NEXT: Check CFA info and insert CFI instructions if needed ; CHECK-NEXT: Lazy Machine Block Frequency Analysis ; CHECK-NEXT: Machine Optimization Remark Emitter Index: llvm/test/CodeGen/X86/O3-pipeline.ll =================================================================== --- llvm/test/CodeGen/X86/O3-pipeline.ll +++ llvm/test/CodeGen/X86/O3-pipeline.ll @@ -181,7 +181,8 @@ ; CHECK-NEXT: Contiguously Lay Out Funclets ; CHECK-NEXT: StackMap Liveness Analysis ; CHECK-NEXT: Live DEBUG_VALUE analysis -; CHECK-NEXT: X86 Retpoline Thunks +; CHECK-NEXT: X86 Retpoline Thunks +; CHECK-NEXT: FunctionPass Manager ; CHECK-NEXT: Check CFA info and insert CFI instructions if needed ; CHECK-NEXT: Lazy Machine Block Frequency Analysis ; CHECK-NEXT: Machine Optimization Remark Emitter Index: llvm/test/CodeGen/X86/speculative-load-hardening-indirect.ll =================================================================== --- llvm/test/CodeGen/X86/speculative-load-hardening-indirect.ll +++ llvm/test/CodeGen/X86/speculative-load-hardening-indirect.ll @@ -449,38 +449,38 @@ ; X64-RETPOLINE-NEXT: movq global_blockaddrs(,%rdx,8), %rdx ; X64-RETPOLINE-NEXT: orq %rcx, %rdx ; X64-RETPOLINE-NEXT: cmpq $2, %rdx -; X64-RETPOLINE-NEXT: je .LBB6_4 +; X64-RETPOLINE-NEXT: je .LBB5_4 ; X64-RETPOLINE-NEXT: # %bb.1: # %entry ; X64-RETPOLINE-NEXT: cmoveq %rax, %rcx ; X64-RETPOLINE-NEXT: cmpq $3, %rdx -; X64-RETPOLINE-NEXT: je .LBB6_5 +; X64-RETPOLINE-NEXT: je .LBB5_5 ; X64-RETPOLINE-NEXT: # %bb.2: # %entry ; X64-RETPOLINE-NEXT: cmoveq %rax, %rcx ; X64-RETPOLINE-NEXT: cmpq $4, %rdx -; X64-RETPOLINE-NEXT: jne .LBB6_3 -; X64-RETPOLINE-NEXT: .Ltmp0: # Block address taken +; X64-RETPOLINE-NEXT: jne .LBB5_3 +; X64-RETPOLINE-NEXT: .Ltmp1: # Block address taken ; X64-RETPOLINE-NEXT: # %bb.6: # %bb3 ; X64-RETPOLINE-NEXT: cmovneq %rax, %rcx ; X64-RETPOLINE-NEXT: shlq $47, %rcx ; X64-RETPOLINE-NEXT: movl $42, %eax ; X64-RETPOLINE-NEXT: orq %rcx, %rsp ; X64-RETPOLINE-NEXT: retq -; X64-RETPOLINE-NEXT: .Ltmp1: # Block address taken -; X64-RETPOLINE-NEXT: .LBB6_4: # %bb1 +; X64-RETPOLINE-NEXT: .Ltmp2: # Block address taken +; X64-RETPOLINE-NEXT: .LBB5_4: # %bb1 ; X64-RETPOLINE-NEXT: cmovneq %rax, %rcx ; X64-RETPOLINE-NEXT: shlq $47, %rcx ; X64-RETPOLINE-NEXT: movl $7, %eax ; X64-RETPOLINE-NEXT: orq %rcx, %rsp ; X64-RETPOLINE-NEXT: retq -; X64-RETPOLINE-NEXT: .Ltmp2: # Block address taken -; X64-RETPOLINE-NEXT: .LBB6_5: # %bb2 +; X64-RETPOLINE-NEXT: .Ltmp3: # Block address taken +; X64-RETPOLINE-NEXT: .LBB5_5: # %bb2 ; X64-RETPOLINE-NEXT: cmovneq %rax, %rcx ; X64-RETPOLINE-NEXT: shlq $47, %rcx ; X64-RETPOLINE-NEXT: movl $13, %eax ; X64-RETPOLINE-NEXT: orq %rcx, %rsp ; X64-RETPOLINE-NEXT: retq -; X64-RETPOLINE-NEXT: .Ltmp3: # Block address taken -; X64-RETPOLINE-NEXT: .LBB6_3: # %bb0 +; X64-RETPOLINE-NEXT: .Ltmp4: # Block address taken +; X64-RETPOLINE-NEXT: .LBB5_3: # %bb0 ; X64-RETPOLINE-NEXT: cmoveq %rax, %rcx ; X64-RETPOLINE-NEXT: shlq $47, %rcx ; X64-RETPOLINE-NEXT: movl $2, %eax @@ -623,48 +623,48 @@ ; X64-RETPOLINE-NEXT: movq $-1, %rax ; X64-RETPOLINE-NEXT: sarq $63, %rcx ; X64-RETPOLINE-NEXT: cmpl $1, %edi -; X64-RETPOLINE-NEXT: jg .LBB7_4 +; X64-RETPOLINE-NEXT: jg .LBB6_4 ; X64-RETPOLINE-NEXT: # %bb.1: # %entry ; X64-RETPOLINE-NEXT: cmovgq %rax, %rcx ; X64-RETPOLINE-NEXT: testl %edi, %edi -; X64-RETPOLINE-NEXT: je .LBB7_7 +; X64-RETPOLINE-NEXT: je .LBB6_7 ; X64-RETPOLINE-NEXT: # %bb.2: # %entry ; X64-RETPOLINE-NEXT: cmoveq %rax, %rcx ; X64-RETPOLINE-NEXT: cmpl $1, %edi -; X64-RETPOLINE-NEXT: jne .LBB7_6 +; X64-RETPOLINE-NEXT: jne .LBB6_6 ; X64-RETPOLINE-NEXT: # %bb.3: # %bb2 ; X64-RETPOLINE-NEXT: cmovneq %rax, %rcx ; X64-RETPOLINE-NEXT: shlq $47, %rcx ; X64-RETPOLINE-NEXT: movl $13, %eax ; X64-RETPOLINE-NEXT: orq %rcx, %rsp ; X64-RETPOLINE-NEXT: retq -; X64-RETPOLINE-NEXT: .LBB7_4: # %entry +; X64-RETPOLINE-NEXT: .LBB6_4: # %entry ; X64-RETPOLINE-NEXT: cmovleq %rax, %rcx ; X64-RETPOLINE-NEXT: cmpl $2, %edi -; X64-RETPOLINE-NEXT: je .LBB7_8 +; X64-RETPOLINE-NEXT: je .LBB6_8 ; X64-RETPOLINE-NEXT: # %bb.5: # %entry ; X64-RETPOLINE-NEXT: cmoveq %rax, %rcx ; X64-RETPOLINE-NEXT: cmpl $3, %edi -; X64-RETPOLINE-NEXT: jne .LBB7_6 +; X64-RETPOLINE-NEXT: jne .LBB6_6 ; X64-RETPOLINE-NEXT: # %bb.9: # %bb5 ; X64-RETPOLINE-NEXT: cmovneq %rax, %rcx ; X64-RETPOLINE-NEXT: shlq $47, %rcx ; X64-RETPOLINE-NEXT: movl $11, %eax ; X64-RETPOLINE-NEXT: orq %rcx, %rsp ; X64-RETPOLINE-NEXT: retq -; X64-RETPOLINE-NEXT: .LBB7_6: +; X64-RETPOLINE-NEXT: .LBB6_6: ; X64-RETPOLINE-NEXT: cmoveq %rax, %rcx ; X64-RETPOLINE-NEXT: shlq $47, %rcx ; X64-RETPOLINE-NEXT: movl $2, %eax ; X64-RETPOLINE-NEXT: orq %rcx, %rsp ; X64-RETPOLINE-NEXT: retq -; X64-RETPOLINE-NEXT: .LBB7_7: # %bb1 +; X64-RETPOLINE-NEXT: .LBB6_7: # %bb1 ; X64-RETPOLINE-NEXT: cmovneq %rax, %rcx ; X64-RETPOLINE-NEXT: shlq $47, %rcx ; X64-RETPOLINE-NEXT: movl $7, %eax ; X64-RETPOLINE-NEXT: orq %rcx, %rsp ; X64-RETPOLINE-NEXT: retq -; X64-RETPOLINE-NEXT: .LBB7_8: # %bb3 +; X64-RETPOLINE-NEXT: .LBB6_8: # %bb3 ; X64-RETPOLINE-NEXT: cmovneq %rax, %rcx ; X64-RETPOLINE-NEXT: shlq $47, %rcx ; X64-RETPOLINE-NEXT: movl $42, %eax @@ -808,49 +808,49 @@ ; X64-RETPOLINE-NEXT: sarq $63, %r9 ; X64-RETPOLINE-NEXT: xorl %eax, %eax ; X64-RETPOLINE-NEXT: cmpl $1, %edi -; X64-RETPOLINE-NEXT: jg .LBB8_5 +; X64-RETPOLINE-NEXT: jg .LBB7_5 ; X64-RETPOLINE-NEXT: # %bb.1: # %entry ; X64-RETPOLINE-NEXT: cmovgq %r10, %r9 ; X64-RETPOLINE-NEXT: testl %edi, %edi -; X64-RETPOLINE-NEXT: je .LBB8_2 +; X64-RETPOLINE-NEXT: je .LBB7_2 ; X64-RETPOLINE-NEXT: # %bb.3: # %entry ; X64-RETPOLINE-NEXT: cmoveq %r10, %r9 ; X64-RETPOLINE-NEXT: cmpl $1, %edi -; X64-RETPOLINE-NEXT: jne .LBB8_8 +; X64-RETPOLINE-NEXT: jne .LBB7_8 ; X64-RETPOLINE-NEXT: # %bb.4: ; X64-RETPOLINE-NEXT: cmovneq %r10, %r9 -; X64-RETPOLINE-NEXT: jmp .LBB8_10 -; X64-RETPOLINE-NEXT: .LBB8_5: # %entry +; X64-RETPOLINE-NEXT: jmp .LBB7_10 +; X64-RETPOLINE-NEXT: .LBB7_5: # %entry ; X64-RETPOLINE-NEXT: cmovleq %r10, %r9 ; X64-RETPOLINE-NEXT: cmpl $2, %edi -; X64-RETPOLINE-NEXT: je .LBB8_6 +; X64-RETPOLINE-NEXT: je .LBB7_6 ; X64-RETPOLINE-NEXT: # %bb.7: # %entry ; X64-RETPOLINE-NEXT: cmoveq %r10, %r9 ; X64-RETPOLINE-NEXT: cmpl $3, %edi -; X64-RETPOLINE-NEXT: jne .LBB8_8 +; X64-RETPOLINE-NEXT: jne .LBB7_8 ; X64-RETPOLINE-NEXT: # %bb.13: ; X64-RETPOLINE-NEXT: cmovneq %r10, %r9 -; X64-RETPOLINE-NEXT: jmp .LBB8_12 -; X64-RETPOLINE-NEXT: .LBB8_8: +; X64-RETPOLINE-NEXT: jmp .LBB7_12 +; X64-RETPOLINE-NEXT: .LBB7_8: ; X64-RETPOLINE-NEXT: cmoveq %r10, %r9 ; X64-RETPOLINE-NEXT: movl (%rsi), %eax ; X64-RETPOLINE-NEXT: orl %r9d, %eax -; X64-RETPOLINE-NEXT: jmp .LBB8_9 -; X64-RETPOLINE-NEXT: .LBB8_2: +; X64-RETPOLINE-NEXT: jmp .LBB7_9 +; X64-RETPOLINE-NEXT: .LBB7_2: ; X64-RETPOLINE-NEXT: cmovneq %r10, %r9 -; X64-RETPOLINE-NEXT: .LBB8_9: # %bb1 +; X64-RETPOLINE-NEXT: .LBB7_9: # %bb1 ; X64-RETPOLINE-NEXT: addl (%rdx), %eax ; X64-RETPOLINE-NEXT: orl %r9d, %eax -; X64-RETPOLINE-NEXT: .LBB8_10: # %bb2 +; X64-RETPOLINE-NEXT: .LBB7_10: # %bb2 ; X64-RETPOLINE-NEXT: addl (%rcx), %eax ; X64-RETPOLINE-NEXT: orl %r9d, %eax -; X64-RETPOLINE-NEXT: jmp .LBB8_11 -; X64-RETPOLINE-NEXT: .LBB8_6: +; X64-RETPOLINE-NEXT: jmp .LBB7_11 +; X64-RETPOLINE-NEXT: .LBB7_6: ; X64-RETPOLINE-NEXT: cmovneq %r10, %r9 -; X64-RETPOLINE-NEXT: .LBB8_11: # %bb3 +; X64-RETPOLINE-NEXT: .LBB7_11: # %bb3 ; X64-RETPOLINE-NEXT: addl (%r8), %eax ; X64-RETPOLINE-NEXT: orl %r9d, %eax -; X64-RETPOLINE-NEXT: .LBB8_12: # %bb4 +; X64-RETPOLINE-NEXT: .LBB7_12: # %bb4 ; X64-RETPOLINE-NEXT: shlq $47, %r9 ; X64-RETPOLINE-NEXT: orq %r9, %rsp ; X64-RETPOLINE-NEXT: retq Index: llvm/test/CodeGen/X86/statepoint-invoke.ll =================================================================== --- llvm/test/CodeGen/X86/statepoint-invoke.ll +++ llvm/test/CodeGen/X86/statepoint-invoke.ll @@ -15,10 +15,10 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 32 ; CHECK-NEXT: movq %rdi, {{[0-9]+}}(%rsp) ; CHECK-NEXT: movq %rsi, {{[0-9]+}}(%rsp) -; CHECK-NEXT: .Ltmp0: +; CHECK-NEXT: [[LABEL0:.Ltmp[0-9]+]]: ; CHECK-NEXT: callq some_call -; CHECK-NEXT: .Ltmp3: -; CHECK-NEXT: .Ltmp1: +; CHECK-NEXT: .Ltmp{{[0-9]+}}: +; CHECK-NEXT: [[LABEL1:.Ltmp[0-9]+]]: ; CHECK-NEXT: # %bb.1: # %invoke_safepoint_normal_dest ; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax ; CHECK-NEXT: addq $24, %rsp @@ -26,7 +26,7 @@ ; CHECK-NEXT: retq ; CHECK-NEXT: .LBB0_2: # %exceptional_return ; CHECK-NEXT: .cfi_def_cfa_offset 32 -; CHECK-NEXT: .Ltmp2: +; CHECK-NEXT: [[LABEL2:.Ltmp[0-9]+]]: ; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax ; CHECK-NEXT: addq $24, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 @@ -53,8 +53,8 @@ ret i64 addrspace(1)* %obj1.relocated1 } ; CHECK-LABEL: GCC_except_table{{[0-9]+}}: -; CHECK: .uleb128 .Ltmp{{[0-9]+}}-.Ltmp{{[0-9]+}} -; CHECK: .uleb128 .Ltmp{{[0-9]+}}-.Lfunc_begin{{[0-9]+}} +; CHECK: .uleb128 [[LABEL1]]-[[LABEL0]] +; CHECK: .uleb128 [[LABEL2]]-.Lfunc_begin{{[0-9]+}} ; CHECK: .byte 0 ; CHECK: .p2align 4 @@ -64,17 +64,17 @@ ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: movq %rdi, (%rsp) -; CHECK-NEXT: .Ltmp4: +; CHECK-NEXT: [[LABEL3:.Ltmp[0-9]+]]: ; CHECK-NEXT: callq some_other_call -; CHECK-NEXT: .Ltmp7: -; CHECK-NEXT: .Ltmp5: +; CHECK-NEXT: .Ltmp{{[0-9]+}}: +; CHECK-NEXT: [[LABEL4:.Ltmp[0-9]+]]: ; CHECK-NEXT: # %bb.1: # %normal_return ; CHECK-NEXT: popq %rcx ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq ; CHECK-NEXT: .LBB1_2: # %exceptional_return ; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: .Ltmp6: +; CHECK-NEXT: [[LABEL5:.Ltmp[0-9]+]]: ; CHECK-NEXT: movq (%rsp), %rax ; CHECK-NEXT: popq %rcx ; CHECK-NEXT: .cfi_def_cfa_offset 8 @@ -96,8 +96,8 @@ ret i64 addrspace(1)* %obj.relocated } ; CHECK-LABEL: GCC_except_table{{[0-9]+}}: -; CHECK: .uleb128 .Ltmp{{[0-9]+}}-.Ltmp{{[0-9]+}} -; CHECK: .uleb128 .Ltmp{{[0-9]+}}-.Lfunc_begin{{[0-9]+}} +; CHECK: .uleb128 [[LABEL4]]-[[LABEL3]] +; CHECK: .uleb128 [[LABEL5]]-.Lfunc_begin{{[0-9]+}} ; CHECK: .byte 0 ; CHECK: .p2align 4 @@ -115,11 +115,11 @@ ; CHECK-NEXT: # %bb.1: # %left ; CHECK-NEXT: movq %rsi, (%rsp) ; CHECK-NEXT: movq %rdx, {{[0-9]+}}(%rsp) -; CHECK-NEXT: .Ltmp11: +; CHECK-NEXT: [[LABEL9:.Ltmp[0-9]+]]: ; CHECK-NEXT: movq %rsi, %rdi ; CHECK-NEXT: callq some_call -; CHECK-NEXT: .Ltmp14: -; CHECK-NEXT: .Ltmp12: +; CHECK-NEXT: .Ltmp{{[0-9]+}}: +; CHECK-NEXT: [[LABEL10:.Ltmp[0-9]+]]: ; CHECK-NEXT: # %bb.2: # %left.relocs ; CHECK-NEXT: movq (%rsp), %rax ; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rcx @@ -127,11 +127,11 @@ ; CHECK-NEXT: .LBB2_3: # %right ; CHECK-NEXT: movq %rdx, (%rsp) ; CHECK-NEXT: movq %rcx, {{[0-9]+}}(%rsp) -; CHECK-NEXT: .Ltmp8: +; CHECK-NEXT: [[LABEL6:.Ltmp[0-9]+]]: ; CHECK-NEXT: movq %rsi, %rdi ; CHECK-NEXT: callq some_call -; CHECK-NEXT: .Ltmp15: -; CHECK-NEXT: .Ltmp9: +; CHECK-NEXT: .Ltmp{{[0-9]+}}: +; CHECK-NEXT: [[LABEL7:.Ltmp[0-9]+]]: ; CHECK-NEXT: # %bb.4: # %right.relocs ; CHECK-NEXT: movq (%rsp), %rcx ; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax @@ -146,11 +146,11 @@ ; CHECK-NEXT: retq ; CHECK-NEXT: .LBB2_9: # %exceptional_return.right ; CHECK-NEXT: .cfi_def_cfa_offset 32 -; CHECK-NEXT: .Ltmp10: +; CHECK-NEXT: [[LABEL8:.Ltmp[0-9]+]]: ; CHECK-NEXT: movq (%rsp), %rax ; CHECK-NEXT: jmp .LBB2_6 ; CHECK-NEXT: .LBB2_7: # %exceptional_return.left -; CHECK-NEXT: .Ltmp13: +; CHECK-NEXT: [[LABEL11:.Ltmp[0-9]+]]: ; CHECK-NEXT: movq (%rsp), %rax ; CHECK-NEXT: jmp .LBB2_6 gc "statepoint-example" personality i32 ()* @"personality_function" { @@ -199,10 +199,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: .Ltmp16: +; CHECK-NEXT: [[LABEL12:.Ltmp[0-9]+]]: ; CHECK-NEXT: callq some_call -; CHECK-NEXT: .Ltmp19: -; CHECK-NEXT: .Ltmp17: +; CHECK-NEXT: .Ltmp{{[0-9]+}}: +; CHECK-NEXT: [[LABEL13:.Ltmp[0-9]+]]: ; CHECK-NEXT: .LBB3_1: # %normal_return ; CHECK-NEXT: xorl %eax, %eax ; CHECK-NEXT: popq %rcx @@ -210,7 +210,7 @@ ; CHECK-NEXT: retq ; CHECK-NEXT: .LBB3_2: # %exceptional_return ; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: .Ltmp18: +; CHECK-NEXT: [[LABEL14:.Ltmp[0-9]+]]: ; CHECK-NEXT: jmp .LBB3_1 gc "statepoint-example" personality i32 ()* @"personality_function" { entry: @@ -235,10 +235,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: .Ltmp20: +; CHECK-NEXT: [[LABEL15:.Ltmp[0-9]+]]: ; CHECK-NEXT: callq some_call -; CHECK-NEXT: .Ltmp23: -; CHECK-NEXT: .Ltmp21: +; CHECK-NEXT: .Ltmp{{[0-9]+}}: +; CHECK-NEXT: [[LABEL16:.Ltmp[0-9]+]]: ; CHECK-NEXT: # %bb.1: # %normal_return ; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rax ; CHECK-NEXT: popq %rcx @@ -246,7 +246,7 @@ ; CHECK-NEXT: retq ; CHECK-NEXT: .LBB4_2: # %exceptional_return ; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: .Ltmp22: +; CHECK-NEXT: [[LABEL17:.Ltmp[0-9]+]]: ; CHECK-NEXT: movl $15, %eax ; CHECK-NEXT: popq %rcx ; CHECK-NEXT: .cfi_def_cfa_offset 8