diff --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h --- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h +++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.h @@ -66,6 +66,25 @@ DecodeStatus tryDecodeInst(const uint8_t* Table, MCInst &MI, uint64_t Inst, uint64_t Address) const; + Optional onSymbolStart(SymbolInfoTy &Symbol, uint64_t &Size, + ArrayRef Bytes, + uint64_t Address, + raw_ostream &CStream) const override; + + DecodeStatus decodeKernelDescriptor(StringRef KdName, ArrayRef Bytes, + uint64_t &Size, uint64_t KdAddress) const; + + DecodeStatus + decodeKernelDescriptorDirective(size_t &CurrentIndex, ArrayRef Bytes, + uint64_t &Size, + raw_string_ostream &KdStream) const; + + DecodeStatus decodeCOMPUTE_PGM_RSRC1(uint32_t FourByteBuffer, + raw_string_ostream &KdStream) const; + + DecodeStatus decodeCOMPUTE_PGM_RSRC2(uint32_t FourByteBuffer, + raw_string_ostream &KdStream) const; + DecodeStatus convertSDWAInst(MCInst &MI) const; DecodeStatus convertDPP8Inst(MCInst &MI) const; DecodeStatus convertMIMGInst(MCInst &MI) const; diff --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp --- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp +++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp @@ -34,6 +34,8 @@ #include "llvm/MC/MCFixedLenDisassembler.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/Support/AMDHSAKernelDescriptor.h" +#include "llvm/Support/DataExtractor.h" #include "llvm/Support/Endian.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" @@ -1215,6 +1217,470 @@ return STI.getFeatureBits()[AMDGPU::FeatureGFX10]; } +//===----------------------------------------------------------------------===// +// AMDGPU specific symbol handling +//===----------------------------------------------------------------------===// +MCDisassembler::DecodeStatus AMDGPUDisassembler::decodeCOMPUTE_PGM_RSRC1( + uint32_t FourByteBuffer, raw_string_ostream &KdStream) const { + // Decode as directives that handle COMPUTE_PGM_RSRC1. + StringRef Indent = "\t"; + + // We cannot accurately backward compute #VGPRs used from + // GRANULATED_WORKITEM_VGPR_COUNT. So we 'decode' as Code Object V3 predefined + // symbol. + KdStream << Indent << ".amdhsa_next_free_vgpr " + << ".amdgcn.next_free_vgpr" + << "\n"; + + // We cannot backward compute values used to calculate + // GRANULATED_WAVEFRONT_SGPR_COUNT. Hence the original values for following + // directives can't be computed: + // .amdhsa_reserve_vcc + // .amdhsa_reserve_flat_scratch + // .amdhsa_reserve_xnack_mask + // They take their respective default values if not specified in assembly. + // + // GRANULATED_WAVEFRONT_SGPR_COUNT + // = NEXT_FREE_SGPR + VCC + FLAT_SCRATCH + XNACK_MASK + // + // To to get the exact same bytes in re-assembled binary, we disassemble + // aamdhsa_next_free_sgpr as the amdgcn.next_free_sgpr assembler symbol and + // set the remaining directives to "0". + // + // So now we see : + // + // GRANULATED_WAVEFRONT_SGPR_COUNT + // = NEXT_FREE_SGPR + 0 + 0 + 0 + // + // The disassembler cannot recover the original values of those directives. + KdStream << Indent << ".amdhsa_reserve_vcc " << 0 << "\n"; + KdStream << Indent << ".amdhsa_reserve_flat_scratch " << 0 << "\n"; + KdStream << Indent << ".amdhsa_reserve_xnack_mask " << 0 << "\n"; + + KdStream << Indent << ".amdhsa_next_free_sgpr " + << ".amdgcn.next_free_sgpr\n"; + + // 11:10 bits (PRIORITY) must be 0. + if ((FourByteBuffer & amdhsa::COMPUTE_PGM_RSRC1_PRIORITY) >> + amdhsa::COMPUTE_PGM_RSRC1_PRIORITY_SHIFT) { + return MCDisassembler::Fail; + } + KdStream << Indent << ".amdhsa_float_round_mode_32 " + << ((FourByteBuffer & + amdhsa::COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32) >> + amdhsa::COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32_SHIFT) + << "\n"; + KdStream << Indent << ".amdhsa_float_round_mode_16_64 " + << ((FourByteBuffer & + amdhsa::COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64) >> + amdhsa::COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64_SHIFT) + << "\n"; + KdStream << Indent << ".amdhsa_float_denorm_mode_32 " + << ((FourByteBuffer & + amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32) >> + amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32_SHIFT) + << "\n"; + KdStream << Indent << ".amdhsa_float_denorm_mode_16_64 " + << ((FourByteBuffer & + amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64) >> + amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64_SHIFT) + << "\n"; + + // 20th bit (PRIV) must be 0. + if ((FourByteBuffer & amdhsa::COMPUTE_PGM_RSRC1_PRIV) >> + amdhsa::COMPUTE_PGM_RSRC1_PRIV_SHIFT) { + return MCDisassembler::Fail; + } + + KdStream << Indent << ".amdhsa_dx10_clamp " + << ((FourByteBuffer & amdhsa::COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP) >> + amdhsa::COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP_SHIFT) + << "\n"; + + // 22nd bit (DEBUG_MODE) must be 0. + if ((FourByteBuffer & amdhsa::COMPUTE_PGM_RSRC1_DEBUG_MODE) >> + amdhsa::COMPUTE_PGM_RSRC1_DEBUG_MODE_SHIFT) { + return MCDisassembler::Fail; + } + KdStream << Indent << ".amdhsa_ieee_mode " + << ((FourByteBuffer & amdhsa::COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE) >> + amdhsa::COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE_SHIFT) + << "\n"; + + // 24th bit (BULKY) must be 0. + if ((FourByteBuffer & amdhsa::COMPUTE_PGM_RSRC1_BULKY) >> + amdhsa::COMPUTE_PGM_RSRC1_BULKY_SHIFT) { + return MCDisassembler::Fail; + } + // 25th bit (CDBG_USER) must be 0. + if ((FourByteBuffer & amdhsa::COMPUTE_PGM_RSRC1_CDBG_USER) >> + amdhsa::COMPUTE_PGM_RSRC1_CDBG_USER_SHIFT) { + return MCDisassembler::Fail; + } + + KdStream << Indent << ".amdhsa_fp16_overflow " + << ((FourByteBuffer & amdhsa::COMPUTE_PGM_RSRC1_FP16_OVFL) >> + amdhsa::COMPUTE_PGM_RSRC1_FP16_OVFL_SHIFT) + << "\n"; + + // next two bits are reserved amd must be 0. + if ((FourByteBuffer & amdhsa::COMPUTE_PGM_RSRC1_RESERVED0) >> + amdhsa::COMPUTE_PGM_RSRC1_RESERVED0_SHIFT) { + return MCDisassembler::Fail; + } + + KdStream << Indent << ".amdhsa_workgroup_processor_mode " + << ((FourByteBuffer & amdhsa::COMPUTE_PGM_RSRC1_WGP_MODE) >> + amdhsa::COMPUTE_PGM_RSRC1_WGP_MODE_SHIFT) + << "\n"; + KdStream << Indent << ".amdhsa_memory_ordered " + << ((FourByteBuffer & amdhsa::COMPUTE_PGM_RSRC1_MEM_ORDERED) >> + amdhsa::COMPUTE_PGM_RSRC1_MEM_ORDERED_SHIFT) + << "\n"; + KdStream << Indent << ".amdhsa_forward_progress " + << ((FourByteBuffer & amdhsa::COMPUTE_PGM_RSRC1_FWD_PROGRESS) >> + amdhsa::COMPUTE_PGM_RSRC1_FWD_PROGRESS_SHIFT) + << "\n"; + + return MCDisassembler::Success; +} // decodeCOMPUTE_PGM_RSRC1() + +MCDisassembler::DecodeStatus AMDGPUDisassembler::decodeCOMPUTE_PGM_RSRC2( + uint32_t FourByteBuffer, raw_string_ostream &KdStream) const { + // Decode as directives that handle COMPUTE_PGM_RSRC2. + StringRef Indent = "\t"; + + KdStream + << Indent << ".amdhsa_system_sgpr_private_segment_wavefront_offset " + << ((FourByteBuffer & + amdhsa:: + COMPUTE_PGM_RSRC2_ENABLE_SGPR_PRIVATE_SEGMENT_WAVEFRONT_OFFSET) >> + amdhsa:: + COMPUTE_PGM_RSRC2_ENABLE_SGPR_PRIVATE_SEGMENT_WAVEFRONT_OFFSET_SHIFT) + << "\n"; + + KdStream << Indent << ".amdhsa_system_sgpr_workgroup_id_x " + << ((FourByteBuffer & + amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X) >> + amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X_SHIFT) + << "\n"; + KdStream << Indent << ".amdhsa_system_sgpr_workgroup_id_y " + << ((FourByteBuffer & + amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y) >> + amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y_SHIFT) + << "\n"; + KdStream << Indent << ".amdhsa_system_sgpr_workgroup_id_z " + << ((FourByteBuffer & + amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z) >> + amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z_SHIFT) + << "\n"; + KdStream << Indent << ".amdhsa_system_sgpr_workgroup_info " + << ((FourByteBuffer & + amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO) >> + amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO_SHIFT) + << "\n"; + KdStream << Indent << ".amdhsa_system_vgpr_workitem_id: " + << ((FourByteBuffer & + amdhsa::COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID) >> + amdhsa::COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID_SHIFT) + << "\n"; + + // 13th bit (ENABLE_EXCEPTION_ADDRESS_WATCH) must be 0. + if ((FourByteBuffer & + amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_ADDRESS_WATCH) >> + amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_ADDRESS_WATCH_SHIFT) { + return MCDisassembler::Fail; + } + // 14th bit (ENABLE_EXCEPTION_MEMORY) must be 0. + if ((FourByteBuffer & amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_MEMORY) >> + amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_MEMORY_SHIFT) { + return MCDisassembler::Fail; + } + // 23:15 bits (GRANULATED_LDS_SIZE) must be 0. + if ((FourByteBuffer & amdhsa::COMPUTE_PGM_RSRC2_GRANULATED_LDS_SIZE) >> + amdhsa::COMPUTE_PGM_RSRC2_GRANULATED_LDS_SIZE_SHIFT) { + return MCDisassembler::Fail; + } + + KdStream + << Indent << ".amdhsa_exception_fp_ieee_invalid_op " + << ((FourByteBuffer & + amdhsa:: + COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION) >> + amdhsa:: + COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION_SHIFT) + << "\n"; + KdStream + << Indent << ".amdhsa_exception_fp_denorm_src " + << ((FourByteBuffer & + amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE) >> + amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE_SHIFT) + << "\n"; + KdStream + << Indent << ".amdhsa_exception_fp_ieee_div_zero " + << ((FourByteBuffer & + amdhsa:: + COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO) >> + amdhsa:: + COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO_SHIFT) + << "\n"; + KdStream + << Indent << ".amdhsa_exception_fp_ieee_overflow " + << ((FourByteBuffer & + amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW) >> + amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW_SHIFT) + << "\n"; + KdStream + << Indent << ".amdhsa_exception_fp_ieee_underflow " + << ((FourByteBuffer & + amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW) >> + amdhsa:: + COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW_SHIFT) + << "\n"; + KdStream + << Indent << ".amdhsa_exception_fp_ieee_inexact " + << ((FourByteBuffer & + amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT) >> + amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT_SHIFT) + << "\n"; + KdStream + << Indent << ".amdhsa_exception_int_div_zero " + << ((FourByteBuffer & + amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO) >> + amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO_SHIFT) + << "\n"; + + // Last bit. Reserved, must be 0. + if ((FourByteBuffer & amdhsa::COMPUTE_PGM_RSRC2_RESERVED0) >> + amdhsa::COMPUTE_PGM_RSRC2_RESERVED0_SHIFT) { + return MCDisassembler::Fail; + } + return MCDisassembler::Success; +} // decodeCOMPUTE_PGM_RSRC2() + +MCDisassembler::DecodeStatus +AMDGPUDisassembler::decodeKernelDescriptorDirective( + size_t &CurrentIndex, ArrayRef Bytes, uint64_t &Size, + raw_string_ostream &KdStream) const { + uint16_t TwoByteBuffer = 0; + uint32_t FourByteBuffer = 0; + uint64_t EightByteBuffer = 0; + + StringRef ReservedBytes; + StringRef Indent = "\t"; + + DataExtractor DE(Bytes, /*IsLittleEndian =*/true, /*AddressSize =*/8); + + // When we fail, we set: + // Size = CurrentIndex (i.e starting point of the chunk of bytes) + // + the length of the chunk. + // The failed region is from 0 to this new value of Size. We do this because + // many directives in the kernel descriptor affect single or very few bits. + switch (CurrentIndex) { + case 0: + FourByteBuffer = DE.getU32(&CurrentIndex); + KdStream << Indent << ".amdhsa_group_segment_fixed_size " << FourByteBuffer + << "\n"; + return MCDisassembler::Success; + + case 4: // 0 + 4 + FourByteBuffer = DE.getU32(&CurrentIndex); + KdStream << Indent << ".amdhsa_private_segment_fixed_size " + << FourByteBuffer << "\n"; + return MCDisassembler::Success; + + case 8: // 4 + 4 + // 8 reserved bytes, must be 0. + EightByteBuffer = DE.getU64(&CurrentIndex); + if (EightByteBuffer) { + Size = 8 + 8; + return MCDisassembler::Fail; + } + return MCDisassembler::Success; + + case 16: // 8 + 8 + // KERNEL_CODE_ENTRY_BYTE_OFFSET + // So far no directive controls this for Code Object V3, so simply skip for + // disassembly. + CurrentIndex += 8; + return MCDisassembler::Success; + + case 24: // 16 + 8 + // 20 reserved bytes, must be 0. + ReservedBytes = DE.getBytes(&CurrentIndex, 20); + for (int I = 0; I < 20; ++I) { + if (ReservedBytes[I] != 0) { + Size = 24 + 20; + return MCDisassembler::Fail; + } + } + return MCDisassembler::Success; + + case 44: // 24 + 20 + // COMPUTE_PGM_RSRC3 + // - Only set for GFX10, GFX6-9 have this to be 0. + // - Currently no directives directly control this. + FourByteBuffer = DE.getU32(&CurrentIndex); + if (!isGFX10() && FourByteBuffer) { + Size = 44 + 4; + return MCDisassembler::Fail; + } + return MCDisassembler::Success; + + case 48: // 44 + 4 + // COMPUTE_PGM_RSRC1 + FourByteBuffer = DE.getU32(&CurrentIndex); + if (decodeCOMPUTE_PGM_RSRC1(FourByteBuffer, KdStream) == + MCDisassembler::Fail) { + Size = 48 + 4; + return MCDisassembler::Fail; + } + return MCDisassembler::Success; + + case 52: // 48 + 4 + // COMPUTE_PGM_RSRC2 + FourByteBuffer = DE.getU32(&CurrentIndex); + if (decodeCOMPUTE_PGM_RSRC2(FourByteBuffer, KdStream) == + MCDisassembler::Fail) { + Size = 52 + 4; + return MCDisassembler::Fail; + } + return MCDisassembler::Success; + + case 56: // 52 + 4 + TwoByteBuffer = DE.getU16(&CurrentIndex); + KdStream + << Indent << ".amdhsa_user_sgpr_private_segment_buffer " + << ((TwoByteBuffer & + amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER) >> + amdhsa:: + KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER_SHIFT) + << "\n"; + KdStream << Indent << ".amdhsa_user_sgpr_dispatch_ptr " + << ((TwoByteBuffer & + amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR) >> + amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR_SHIFT) + << "\n"; + KdStream << Indent << ".amdhsa_user_sgpr_queue_ptr " + << ((TwoByteBuffer & + amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR) >> + amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR_SHIFT) + << "\n"; + KdStream + << Indent << ".amdhsa_user_sgpr_kernarg_segment_ptr " + << ((TwoByteBuffer & + amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR) >> + amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR_SHIFT) + << "\n"; + KdStream << Indent << ".amdhsa_user_sgpr_dispatch_id " + << ((TwoByteBuffer & + amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID) >> + amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID_SHIFT) + << "\n"; + KdStream + << Indent << ".amdhsa_user_sgpr_flat_scratch_init " + << ((TwoByteBuffer & + amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT) >> + amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT_SHIFT) + << "\n"; + KdStream + << Indent << ".amdhsa_user_sgpr_private_segment_size " + << ((TwoByteBuffer & + amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE) >> + amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE_SHIFT) + << "\n"; + + // Next 3 bits are reserved, must be 0. + if ((TwoByteBuffer & amdhsa::KERNEL_CODE_PROPERTY_RESERVED0) >> + amdhsa::KERNEL_CODE_PROPERTY_RESERVED0_SHIFT) { + Size = 56 + 2; + return MCDisassembler::Fail; + } + + KdStream << Indent << ".amdhsa_wavefront_size32: " + << ((TwoByteBuffer & + amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32) >> + amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32_SHIFT) + << "\n"; + + // Rest of the bits are reserved and must be 0. + if ((TwoByteBuffer & amdhsa::KERNEL_CODE_PROPERTY_RESERVED1) >> + amdhsa::KERNEL_CODE_PROPERTY_RESERVED1_SHIFT) { + Size = 56 + 2; + return MCDisassembler::Fail; + } + return MCDisassembler::Success; + + case 58: // 56 + 2 + // 6 bytes from here are reserved, must be 0. + ReservedBytes = DE.getBytes(&CurrentIndex, 6); + for (int I = 0; I < 6; ++I) { + if (ReservedBytes[I] != 0) { + Size = 58 + 6; + return MCDisassembler::Fail; + } + } + // 58 + 6 = 64. End of kernel descriptor. + return MCDisassembler::Success; + } +} // decodeKernelDescriptorDirective() + +MCDisassembler::DecodeStatus AMDGPUDisassembler::decodeKernelDescriptor( + StringRef KdName, ArrayRef Bytes, uint64_t &Size, + uint64_t KdAddress) const { + // CP microcode requires the kernel descriptor to be 64 aligned. + if (Bytes.size() != 64 || KdAddress % 64 != 0) + return MCDisassembler::Fail; + + std::string Kd; + raw_string_ostream KdStream(Kd); + KdStream << ".amdhsa_kernel " << KdName.drop_back(3).str() << "\n"; + + size_t CurrentIndex = 0; + while (CurrentIndex < Bytes.size()) { + MCDisassembler::DecodeStatus Status = + decodeKernelDescriptorDirective(CurrentIndex, Bytes, Size, KdStream); + + if (Status == MCDisassembler::Fail) + return MCDisassembler::Fail; + } + KdStream << ".end_amdhsa_kernel\n"; + outs() << KdStream.str(); + return MCDisassembler::Success; +} + +Optional +AMDGPUDisassembler::onSymbolStart(SymbolInfoTy &Symbol, uint64_t &Size, + ArrayRef Bytes, uint64_t Address, + raw_ostream &CStream) const { + // Right now only kernel descriptor needs to be handled. + // We ignore all other symbols for target specific handling. + // TODO: + // Fix the spurious symbol issue for AMDGPU kernels. Exists for both Code + // Object V2 and V3. + + // amd_kernel_code_t for Code Object V2 + if (Symbol.Type == ELF::STT_AMDGPU_HSA_KERNEL) { + // Right now this condition will always evaluate to false due to above + // mentioned issue. + + Size = 256; + return MCDisassembler::SoftFail; + } + + // Code Object V3 kernel descriptors. + StringRef Name = Symbol.Name; + if (Symbol.Type == ELF::STT_OBJECT && Name.endswith(StringRef(".kd"))) { + if (decodeKernelDescriptor(Name, Bytes, Size, Address) == + MCDisassembler::Success) { + Size = Bytes.size(); + return MCDisassembler::Success; + } + return MCDisassembler::Fail; + } + return None; +} + //===----------------------------------------------------------------------===// // AMDGPUSymbolizer //===----------------------------------------------------------------------===// diff --git a/llvm/tools/llvm-objdump/llvm-objdump.cpp b/llvm/tools/llvm-objdump/llvm-objdump.cpp --- a/llvm/tools/llvm-objdump/llvm-objdump.cpp +++ b/llvm/tools/llvm-objdump/llvm-objdump.cpp @@ -1396,23 +1396,6 @@ outs() << SectionName << ":\n"; } - if (Obj->isELF() && Obj->getArch() == Triple::amdgcn) { - if (Symbols[SI].Type == ELF::STT_AMDGPU_HSA_KERNEL) { - // skip amd_kernel_code_t at the begining of kernel symbol (256 bytes) - Start += 256; - } - if (SI == SE - 1 || - Symbols[SI + 1].Type == ELF::STT_AMDGPU_HSA_KERNEL) { - // cut trailing zeroes at the end of kernel - // cut up to 256 bytes - const uint64_t EndAlign = 256; - const auto Limit = End - (std::min)(EndAlign, End - Start); - while (End > Limit && - *reinterpret_cast(&Bytes[End - 4]) == 0) - End -= 4; - } - } - outs() << '\n'; if (!NoLeadingAddr) outs() << format(Is64Bits ? "%016" PRIx64 " " : "%08" PRIx64 " ",