Index: llvm/include/llvm/MC/MCAsmInfo.h =================================================================== --- llvm/include/llvm/MC/MCAsmInfo.h +++ llvm/include/llvm/MC/MCAsmInfo.h @@ -186,6 +186,9 @@ /// alignment is supported. bool UseDotAlignForAlignment = false; + /// True if target supports .uleb128 directive. + bool HasULEB128Directive = true; + //===--- Data Emission Directives -------------------------------------===// /// This should be set to the directive used to get some number of zero (and @@ -575,6 +578,8 @@ return UseDotAlignForAlignment; } + bool hasULEB128Directive() const { return HasULEB128Directive; } + const char *getZeroDirective() const { return ZeroDirective; } bool doesZeroDirectiveSupportNonZeroValue() const { return ZeroDirectiveSupportsNonZeroValue; Index: llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp =================================================================== --- llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp +++ llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp @@ -413,9 +413,11 @@ bool IsSJLJ = Asm->MAI->getExceptionHandlingType() == ExceptionHandling::SjLj; bool IsWasm = Asm->MAI->getExceptionHandlingType() == ExceptionHandling::Wasm; + bool HasULEB128Directive = Asm->MAI->hasULEB128Directive(); unsigned CallSiteEncoding = - IsSJLJ ? static_cast(dwarf::DW_EH_PE_udata4) : - Asm->getObjFileLowering().getCallSiteEncoding(); + IsSJLJ || !HasULEB128Directive + ? static_cast(dwarf::DW_EH_PE_udata4) + : Asm->getObjFileLowering().getCallSiteEncoding(); bool HaveTTData = !TypeInfos.empty() || !FilterIds.empty(); // Type infos. @@ -505,6 +507,79 @@ Asm->OutStreamer->emitLabel(CstBeginLabel); }; + // An alternative path of EmitTypeTableRefAndCallSiteTableEndRef. + // For some platforms, the system assembler does not accept the form of + // `.uleb128 label2 - label1`. In those situation, we would need to calculate + // the size between label1 and label2 manually. + // In this case, we would need to calculate the LSDA size, and the call + // site table size. + auto EmitTypeTableOffsetAndCallSiteTableOffset = [&]() { + assert(CallSiteEncoding == dwarf::DW_EH_PE_udata4 && !HasULEB128Directive && + "Target supports .uleb128 do not need to take this path."); + if (CallSiteRanges.size() > 1) + report_fatal_error("-fbasic-block-sections is not yet supported on " + "platforms that do not have .uleb128 directive."); + + unsigned CallSiteTableSize = 0; + const CallSiteRange &CSRange = CallSiteRanges.back(); + for (size_t CallSiteIdx = CSRange.CallSiteBeginIdx; + CallSiteIdx != CSRange.CallSiteEndIdx; ++CallSiteIdx) { + const CallSiteEntry &S = CallSites[CallSiteIdx]; + // Each call site entry is consist of 3 udata4 fields (12 bytes) and + // 1 uleb128 field. + CallSiteTableSize += 12 + getULEB128Size(S.Action); + } + + Asm->emitEncodingByte(TTypeEncoding, "@TType"); + if (HaveTTData) { + const unsigned ByteSizeOfCallSiteOffset = + getULEB128Size(CallSiteTableSize); + unsigned ActionTableSize = 0; + for (SmallVectorImpl::const_iterator I = Actions.begin(), + E = Actions.end(); + I != E; ++I) { + const ActionEntry &Action = *I; + // Each action entry is consist of two sleb128 fields. + ActionTableSize += getSLEB128Size(Action.ValueForTypeID) + + getSLEB128Size(Action.NextAction); + } + + const unsigned TypeInfoSize = + Asm->GetSizeOfEncodedValue(TTypeEncoding) * MF->getTypeInfos().size(); + + const unsigned LSDASizeBeforeAlign = + 1 // Call site encoding byte. + + ByteSizeOfCallSiteOffset // uleb128 encoding of CallSiteTableSize. + + CallSiteTableSize // Call site Table content. + + ActionTableSize; // Action table content. + + const unsigned LSDASizeWithoutAlign = LSDASizeBeforeAlign + TypeInfoSize; + const unsigned ByteSizeOfLSDAWithoutAlign = + getULEB128Size(LSDASizeWithoutAlign); + const unsigned DisplacementBeforeAlign = + 2 // LP Encoding and Type Encoding. + + ByteSizeOfLSDAWithoutAlign + LSDASizeBeforeAlign; + + // The type info area starts with 4 byte alignment. + const unsigned NeedAlignVal = (4 - DisplacementBeforeAlign % 4) % 4; + unsigned LSDASizeWithAlign = LSDASizeWithoutAlign + NeedAlignVal; + const unsigned ByteSizeOfLSDAWithAlign = + getULEB128Size(LSDASizeWithAlign); + + // The LSDASizeWithAlign could use 1 byte less padding for alignment + // when the data we use to represent the LSDA Size needs to be 1 byte + // larger than we previously calcuated without alignment. + if (ByteSizeOfLSDAWithAlign > ByteSizeOfLSDAWithoutAlign) + LSDASizeWithAlign = LSDASizeWithAlign - 1; + + Asm->OutStreamer->emitULEB128IntValue(LSDASizeWithAlign, + ByteSizeOfLSDAWithAlign); + } + + Asm->emitEncodingByte(CallSiteEncoding, "Call site"); + Asm->OutStreamer->emitULEB128IntValue(CallSiteTableSize); + }; + // SjLj / Wasm Exception handling if (IsSJLJ || IsWasm) { Asm->OutStreamer->emitLabel(Asm->getMBBExceptionSym(Asm->MF->front())); @@ -620,7 +695,10 @@ Asm->MAI->getCodePointerSize()); } - EmitTypeTableRefAndCallSiteTableEndRef(); + if (HasULEB128Directive) + EmitTypeTableRefAndCallSiteTableEndRef(); + else + EmitTypeTableOffsetAndCallSiteTableOffset(); for (size_t CallSiteIdx = CSRange.CallSiteBeginIdx; CallSiteIdx != CSRange.CallSiteEndIdx; ++CallSiteIdx) { Index: llvm/lib/MC/MCAsmInfo.cpp =================================================================== --- llvm/lib/MC/MCAsmInfo.cpp +++ llvm/lib/MC/MCAsmInfo.cpp @@ -21,6 +21,7 @@ using namespace llvm; enum DefaultOnOff { Default, Enable, Disable }; + static cl::opt DwarfExtendedLoc( "dwarf-extended-loc", cl::Hidden, cl::desc("Disable emission of the extended flags in .loc directives."), @@ -28,6 +29,14 @@ clEnumVal(Enable, "Enabled"), clEnumVal(Disable, "Disabled")), cl::init(Default)); +static cl::opt UseULEB128Directive( + "use-uleb128-directive", cl::Hidden, + cl::desc( + "Disable the usage of uleb128 directive, and generate .byte instead."), + cl::values(clEnumVal(Default, "Default for platform"), + clEnumVal(Enable, "Enabled"), clEnumVal(Disable, "Disabled")), + cl::init(Default)); + MCAsmInfo::MCAsmInfo() { SeparatorString = ";"; CommentString = "#"; @@ -51,6 +60,8 @@ WeakDirective = "\t.weak\t"; if (DwarfExtendedLoc != Default) SupportsExtendedDwarfLocDirective = DwarfExtendedLoc == Enable; + if (UseULEB128Directive != Default) + HasULEB128Directive = UseULEB128Directive == Enable; // FIXME: Clang's logic should be synced with the logic used to initialize // this member and the two implementations should be merged. Index: llvm/lib/MC/MCAsmInfoXCOFF.cpp =================================================================== --- llvm/lib/MC/MCAsmInfoXCOFF.cpp +++ llvm/lib/MC/MCAsmInfoXCOFF.cpp @@ -20,6 +20,7 @@ PrivateLabelPrefix = "L.."; SupportsQuotedNames = false; UseDotAlignForAlignment = true; + HasULEB128Directive = false; ZeroDirective = "\t.space\t"; ZeroDirectiveSupportsNonZeroValue = false; AsciiDirective = nullptr; // not supported Index: llvm/test/CodeGen/PowerPC/aix-exception.ll =================================================================== --- llvm/test/CodeGen/PowerPC/aix-exception.ll +++ llvm/test/CodeGen/PowerPC/aix-exception.ll @@ -78,27 +78,27 @@ resume { i8*, i32 } %lpad.val3 } -; ASM: ._Z9catchFuncv: -; ASM: L..func_begin0: -; ASM: # %bb.0: # %entry -; ASM: mflr 0 -; ASM: L..tmp0: -; ASM: bl ._Z9throwFuncv -; ASM: nop -; ASM: L..tmp1: -; ASM: # %bb.1: # %invoke.cont -; ASM: li 3, 1 -; ASM: L..BB1_2: # %return -; ASM: mtlr 0 -; ASM: blr -; ASM: L..BB1_3: # %lpad -; ASM: L..tmp2: -; ASM: bl .__cxa_begin_catch[PR] -; ASM: nop -; ASM: bl .__cxa_end_catch[PR] -; ASM: nop -; ASM: b L..BB1_2 -; ASM: L..func_end0: +; ASM: ._Z9catchFuncv: +; ASM: L..func_begin0: +; ASM: # %bb.0: # %entry +; ASM: mflr 0 +; ASM: L..tmp0: +; ASM: bl ._Z9throwFuncv +; ASM: nop +; ASM: L..tmp1: +; ASM: # %bb.1: # %invoke.cont +; ASM: li 3, 1 +; ASM: L..BB1_2: # %return +; ASM: mtlr 0 +; ASM: blr +; ASM: L..BB1_3: # %lpad +; ASM: L..tmp2: +; ASM: bl .__cxa_begin_catch[PR] +; ASM: nop +; ASM: bl .__cxa_end_catch[PR] +; ASM: nop +; ASM: b L..BB1_2 +; ASM: L..func_end0: ; ASM: .csect .gcc_except_table[RO],2 ; ASM: .align 2 @@ -106,12 +106,11 @@ ; ASM: L..exception0: ; ASM: .byte 255 # @LPStart Encoding = omit ; ASM32: .byte 187 # @TType Encoding = -; ASM64: .byte 188 # @TType Encoding = -; ASM: .uleb128 L..ttbase0-L..ttbaseref0 -; ASM: L..ttbaseref0: +; ASM64: .byte 188 # @TType Encoding = +; ASM32: .byte 37 +; ASM64: .byte 41 ; ASM: .byte 3 # Call site Encoding = udata4 -; ASM: .uleb128 L..cst_end0-L..cst_begin0 -; ASM: L..cst_begin0: +; ASM: .byte 26 ; ASM: .vbyte 4, L..tmp0-L..func_begin0 # >> Call Site 1 << ; ASM: .vbyte 4, L..tmp1-L..tmp0 # Call between L..tmp0 and L..tmp1 ; ASM: .vbyte 4, L..tmp2-L..func_begin0 # jumps to L..tmp2 @@ -140,9 +139,11 @@ ; ASM64: .vbyte 8, GCC_except_table1 ; ASM64: .vbyte 8, __xlcxx_personality_v1[DS] -; ASM: .toc -; ASM: L..C0: -; ASM: .tc _ZTIi[TC],_ZTIi[UA] + + +; ASM: .toc +; ASM: L..C0: +; ASM: .tc _ZTIi[TC],_ZTIi[UA] declare i8* @__cxa_allocate_exception(i32) declare void @__cxa_throw(i8*, i8*, i8*) Index: llvm/test/CodeGen/X86/gnu-eh-alternative.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/X86/gnu-eh-alternative.ll @@ -0,0 +1,104 @@ +; RUN: llc -verify-machineinstrs -mtriple x86_64-pc-linux-gnu -filetype=asm < %s | \ +; RUN: FileCheck --check-prefixes=ASM,ULEB128 %s +; RUN: llc -verify-machineinstrs -mtriple x86_64-pc-linux-gnu -use-uleb128-directive=Enable -filetype=asm < %s | \ +; RUN: FileCheck --check-prefixes=ASM,ULEB128 %s +; RUN: llc -verify-machineinstrs -mtriple x86_64-pc-linux-gnu -use-uleb128-directive=Disable -filetype=asm < %s | \ +; RUN: FileCheck --check-prefixes=ASM,NO128 %s + +@_ZTIi = external dso_local constant i8* + +define dso_local i32 @main() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +entry: + %retval = alloca i32, align 4 + %exn.slot = alloca i8*, align 8 + %ehselector.slot = alloca i32, align 4 + store i32 0, i32* %retval, align 4 + %exception = call i8* @__cxa_allocate_exception(i64 4) #1 + %0 = bitcast i8* %exception to i32* + store i32 1, i32* %0, align 16 + invoke void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null) #2 + to label %unreachable unwind label %lpad + +lpad: ; preds = %entry + %1 = landingpad { i8*, i32 } + catch i8* null + %2 = extractvalue { i8*, i32 } %1, 0 + store i8* %2, i8** %exn.slot, align 8 + %3 = extractvalue { i8*, i32 } %1, 1 + store i32 %3, i32* %ehselector.slot, align 4 + br label %catch + +catch: ; preds = %lpad + %exn = load i8*, i8** %exn.slot, align 8 + %4 = call i8* @__cxa_begin_catch(i8* %exn) #1 + store i32 2, i32* %retval, align 4 + call void @__cxa_end_catch() + br label %return + +try.cont: ; No predecessors! + store i32 1, i32* %retval, align 4 + br label %return + +return: ; preds = %try.cont, %catch + %5 = load i32, i32* %retval, align 4 + ret i32 %5 + +unreachable: ; preds = %entry + unreachable +} + +; ASM: GCC_except_table0: +; ASM: .Lexception0: +; ASM: .byte 255 # @LPStart Encoding = omit +; ASM: .byte 3 # @TType Encoding = udata4 + +; NO128: .byte 49 +; NO128: .byte 3 # Call site Encoding = udata4 +; NO128: .byte 39 +; NO128: .long .Lfunc_begin0-.Lfunc_begin0 # >> Call Site 1 << +; NO128: .long .Ltmp0-.Lfunc_begin0 # Call between .Lfunc_begin0 and .Ltmp0 +; NO128: .long 0 # has no landing pad +; NO128: .byte 0 # On action: cleanup +; NO128: .long .Ltmp0-.Lfunc_begin0 # >> Call Site 2 << +; NO128: .long .Ltmp1-.Ltmp0 # Call between .Ltmp0 and .Ltmp1 +; NO128: .long .Ltmp2-.Lfunc_begin0 # jumps to .Ltmp2 +; NO128: .byte 1 # On action: 1 +; NO128: .long .Ltmp1-.Lfunc_begin0 # >> Call Site 3 << +; NO128: .long .Lfunc_end0-.Ltmp1 # Call between .Ltmp1 and .Lfunc_end0 +; NO128: .long 0 # has no landing pad +; NO128: .byte 0 # On action: cleanup + +; ULEB128: .uleb128 .Lttbase0-.Lttbaseref0 +; ULEB128: .Lttbaseref0: +; ULEB128: .byte 1 # Call site Encoding = uleb128 +; ULEB128: .uleb128 .Lcst_end0-.Lcst_begin0 +; ULEB128: .Lcst_begin0: +; ULEB128: .uleb128 .Lfunc_begin0-.Lfunc_begin0 # >> Call Site 1 << +; ULEB128: .uleb128 .Ltmp0-.Lfunc_begin0 # Call between .Lfunc_begin0 and .Ltmp0 +; ULEB128: .byte 0 # has no landing pad +; ULEB128: .byte 0 # On action: cleanup +; ULEB128: .uleb128 .Ltmp0-.Lfunc_begin0 # >> Call Site 2 << +; ULEB128: .uleb128 .Ltmp1-.Ltmp0 # Call between .Ltmp0 and .Ltmp1 +; ULEB128: .uleb128 .Ltmp2-.Lfunc_begin0 # jumps to .Ltmp2 +; ULEB128: .byte 1 # On action: 1 +; ULEB128: .uleb128 .Ltmp1-.Lfunc_begin0 # >> Call Site 3 << +; ULEB128: .uleb128 .Lfunc_end0-.Ltmp1 # Call between .Ltmp1 and .Lfunc_end0 +; ULEB128: .byte 0 # has no landing pad +; ULEB128: .byte 0 # On action: cleanup + +; ASM: .Lcst_end0: +; ASM: .byte 1 # >> Action Record 1 << +; ASM: # Catch TypeInfo 1 +; ASM: .byte 0 # No further actions +; ASM: .p2align 2 +; ASM: # >> Catch TypeInfos << +; ASM: .long 0 # TypeInfo 1 +; ASM: .Lttbase0: +; ASM: .p2align 2 + +declare dso_local i8* @__cxa_allocate_exception(i64) +declare dso_local void @__cxa_throw(i8*, i8*, i8*) +declare dso_local i32 @__gxx_personality_v0(...) +declare dso_local i8* @__cxa_begin_catch(i8*) +declare dso_local void @__cxa_end_catch() +