Index: lldb/source/Plugins/Process/Linux/LinuxPTraceDefines_arm64sve.h =================================================================== --- /dev/null +++ lldb/source/Plugins/Process/Linux/LinuxPTraceDefines_arm64sve.h @@ -0,0 +1,268 @@ +//===-- LinuxPtraceDefs_arm64.h ---------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// This file provides Linux specific defines to enable compilation with SVE +// ptrace support. These defines may later be removed once they get defined +// in Procfs.h and Ptrace.h. + +// References in Linux kernel source: +// 1) Documentation/arm64/sve.txt +// 2) arch/arm64/include/uapi/asm/ptrace.h +// 3) arch/arm64/include/uapi/asm/sve_context.h +#ifdef INCLUDE_LINUX_PTRACE_DEFINITIONS_FOR_SVE_ARM64 + +#ifndef lldb_LinuxPtraceDefs_arm64_h +#define lldb_LinuxPtraceDefs_arm64_h + +#include + +#define SVE_MAGIC 0x53564501 + +struct sve_context { + struct _aarch64_ctx head; + __u16 vl; + __u16 __reserved[3]; +}; + +#endif /* !__ASSEMBLY__ */ + +/* + * The SVE architecture leaves space for future expansion of the + * vector length beyond its initial architectural limit of 2048 bits + * (16 quadwords). + * + * See linux/Documentation/arm64/sve.txt for a description of the VL/VQ + * terminology. + */ +#define SVE_VQ_BYTES 16 /* number of bytes per quadword */ + +#define SVE_VQ_MIN 1 +#define SVE_VQ_MAX 512 + +#define SVE_VL_MIN (SVE_VQ_MIN * SVE_VQ_BYTES) +#define SVE_VL_MAX (SVE_VQ_MAX * SVE_VQ_BYTES) + +#define SVE_NUM_ZREGS 32 +#define SVE_NUM_PREGS 16 + +#define sve_vl_valid(vl) \ + ((vl) % SVE_VQ_BYTES == 0 && (vl) >= SVE_VL_MIN && (vl) <= SVE_VL_MAX) +#define sve_vq_from_vl(vl) ((vl) / SVE_VQ_BYTES) +#define sve_vl_from_vq(vq) ((vq)*SVE_VQ_BYTES) + +/* + * If the SVE registers are currently live for the thread at signal delivery, + * sve_context.head.size >= + * SVE_SIG_CONTEXT_SIZE(sve_vq_from_vl(sve_context.vl)) + * and the register data may be accessed using the SVE_SIG_*() macros. + * + * If sve_context.head.size < + * SVE_SIG_CONTEXT_SIZE(sve_vq_from_vl(sve_context.vl)), + * the SVE registers were not live for the thread and no register data + * is included: in this case, the SVE_SIG_*() macros should not be + * used except for this check. + * + * The same convention applies when returning from a signal: a caller + * will need to remove or resize the sve_context block if it wants to + * make the SVE registers live when they were previously non-live or + * vice-versa. This may require the the caller to allocate fresh + * memory and/or move other context blocks in the signal frame. + * + * Changing the vector length during signal return is not permitted: + * sve_context.vl must equal the thread's current vector length when + * doing a sigreturn. + * + * + * Note: for all these macros, the "vq" argument denotes the SVE + * vector length in quadwords (i.e., units of 128 bits). + * + * The correct way to obtain vq is to use sve_vq_from_vl(vl). The + * result is valid if and only if sve_vl_valid(vl) is true. This is + * guaranteed for a struct sve_context written by the kernel. + * + * + * Additional macros describe the contents and layout of the payload. + * For each, SVE_SIG_x_OFFSET(args) is the start offset relative to + * the start of struct sve_context, and SVE_SIG_x_SIZE(args) is the + * size in bytes: + * + * x type description + * - ---- ----------- + * REGS the entire SVE context + * + * ZREGS __uint128_t[SVE_NUM_ZREGS][vq] all Z-registers + * ZREG __uint128_t[vq] individual Z-register Zn + * + * PREGS uint16_t[SVE_NUM_PREGS][vq] all P-registers + * PREG uint16_t[vq] individual P-register Pn + * + * FFR uint16_t[vq] first-fault status register + * + * Additional data might be appended in the future. + */ + +#define SVE_SIG_ZREG_SIZE(vq) ((__u32)(vq)*SVE_VQ_BYTES) +#define SVE_SIG_PREG_SIZE(vq) ((__u32)(vq) * (SVE_VQ_BYTES / 8)) +#define SVE_SIG_FFR_SIZE(vq) SVE_SIG_PREG_SIZE(vq) + +#define SVE_SIG_REGS_OFFSET \ + ((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) / SVE_VQ_BYTES * \ + SVE_VQ_BYTES) + +#define SVE_SIG_ZREGS_OFFSET SVE_SIG_REGS_OFFSET +#define SVE_SIG_ZREG_OFFSET(vq, n) \ + (SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREG_SIZE(vq) * (n)) +#define SVE_SIG_ZREGS_SIZE(vq) \ + (SVE_SIG_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_SIG_ZREGS_OFFSET) + +#define SVE_SIG_PREGS_OFFSET(vq) (SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREGS_SIZE(vq)) +#define SVE_SIG_PREG_OFFSET(vq, n) \ + (SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREG_SIZE(vq) * (n)) +#define SVE_SIG_PREGS_SIZE(vq) \ + (SVE_SIG_PREG_OFFSET(vq, SVE_NUM_PREGS) - SVE_SIG_PREGS_OFFSET(vq)) + +#define SVE_SIG_FFR_OFFSET(vq) \ + (SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREGS_SIZE(vq)) + +#define SVE_SIG_REGS_SIZE(vq) \ + (SVE_SIG_FFR_OFFSET(vq) + SVE_SIG_FFR_SIZE(vq) - SVE_SIG_REGS_OFFSET) + +#define SVE_SIG_CONTEXT_SIZE(vq) (SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq)) + +/* SVE/FP/SIMD state (NT_ARM_SVE) */ + +struct user_sve_header { + __u32 size; /* total meaningful regset content in bytes */ + __u32 max_size; /* maxmium possible size for this thread */ + __u16 vl; /* current vector length */ + __u16 max_vl; /* maximum possible vector length */ + __u16 flags; + __u16 __reserved; +}; + +/* Definitions for user_sve_header.flags: */ +#define SVE_PT_REGS_MASK (1 << 0) + +#define SVE_PT_REGS_FPSIMD 0 +#define SVE_PT_REGS_SVE SVE_PT_REGS_MASK + +/* + * Common SVE_PT_* flags: + * These must be kept in sync with prctl interface in + */ +#define SVE_PT_VL_INHERIT (PR_SVE_VL_INHERIT >> 16) +#define SVE_PT_VL_ONEXEC (PR_SVE_SET_VL_ONEXEC >> 16) + +/* + * The remainder of the SVE state follows struct user_sve_header. The + * total size of the SVE state (including header) depends on the + * metadata in the header: SVE_PT_SIZE(vq, flags) gives the total size + * of the state in bytes, including the header. + * + * Refer to for details of how to pass the correct + * "vq" argument to these macros. + */ + +/* Offset from the start of struct user_sve_header to the register data */ +#define SVE_PT_REGS_OFFSET \ + ((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) / SVE_VQ_BYTES * \ + SVE_VQ_BYTES) + +/* + * The register data content and layout depends on the value of the + * flags field. + */ + +/* + * (flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD case: + * + * The payload starts at offset SVE_PT_FPSIMD_OFFSET, and is of type + * struct user_fpsimd_state. Additional data might be appended in the + * future: use SVE_PT_FPSIMD_SIZE(vq, flags) to compute the total size. + * SVE_PT_FPSIMD_SIZE(vq, flags) will never be less than + * sizeof(struct user_fpsimd_state). + */ + +#define SVE_PT_FPSIMD_OFFSET SVE_PT_REGS_OFFSET + +#define SVE_PT_FPSIMD_SIZE(vq, flags) (sizeof(struct user_fpsimd_state)) + +/* + * (flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE case: + * + * The payload starts at offset SVE_PT_SVE_OFFSET, and is of size + * SVE_PT_SVE_SIZE(vq, flags). + * + * Additional macros describe the contents and layout of the payload. + * For each, SVE_PT_SVE_x_OFFSET(args) is the start offset relative to + * the start of struct user_sve_header, and SVE_PT_SVE_x_SIZE(args) is + * the size in bytes: + * + * x type description + * - ---- ----------- + * ZREGS \ + * ZREG | + * PREGS | refer to + * PREG | + * FFR / + * + * FPSR uint32_t FPSR + * FPCR uint32_t FPCR + * + * Additional data might be appended in the future. + */ + +#define SVE_PT_SVE_ZREG_SIZE(vq) SVE_SIG_ZREG_SIZE(vq) +#define SVE_PT_SVE_PREG_SIZE(vq) SVE_SIG_PREG_SIZE(vq) +#define SVE_PT_SVE_FFR_SIZE(vq) SVE_SIG_FFR_SIZE(vq) +#define SVE_PT_SVE_FPSR_SIZE sizeof(__u32) +#define SVE_PT_SVE_FPCR_SIZE sizeof(__u32) + +#define __SVE_SIG_TO_PT(offset) \ + ((offset)-SVE_SIG_REGS_OFFSET + SVE_PT_REGS_OFFSET) + +#define SVE_PT_SVE_OFFSET SVE_PT_REGS_OFFSET + +#define SVE_PT_SVE_ZREGS_OFFSET __SVE_SIG_TO_PT(SVE_SIG_ZREGS_OFFSET) +#define SVE_PT_SVE_ZREG_OFFSET(vq, n) \ + __SVE_SIG_TO_PT(SVE_SIG_ZREG_OFFSET(vq, n)) +#define SVE_PT_SVE_ZREGS_SIZE(vq) \ + (SVE_PT_SVE_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_PT_SVE_ZREGS_OFFSET) + +#define SVE_PT_SVE_PREGS_OFFSET(vq) __SVE_SIG_TO_PT(SVE_SIG_PREGS_OFFSET(vq)) +#define SVE_PT_SVE_PREG_OFFSET(vq, n) \ + __SVE_SIG_TO_PT(SVE_SIG_PREG_OFFSET(vq, n)) +#define SVE_PT_SVE_PREGS_SIZE(vq) \ + (SVE_PT_SVE_PREG_OFFSET(vq, SVE_NUM_PREGS) - SVE_PT_SVE_PREGS_OFFSET(vq)) + +#define SVE_PT_SVE_FFR_OFFSET(vq) __SVE_SIG_TO_PT(SVE_SIG_FFR_OFFSET(vq)) + +#define SVE_PT_SVE_FPSR_OFFSET(vq) \ + ((SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq) + \ + (SVE_VQ_BYTES - 1)) / \ + SVE_VQ_BYTES * SVE_VQ_BYTES) +#define SVE_PT_SVE_FPCR_OFFSET(vq) \ + (SVE_PT_SVE_FPSR_OFFSET(vq) + SVE_PT_SVE_FPSR_SIZE) + +/* + * Any future extension appended after FPCR must be aligned to the next + * 128-bit boundary. + */ + +#define SVE_PT_SVE_SIZE(vq, flags) \ + ((SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE - SVE_PT_SVE_OFFSET + \ + (SVE_VQ_BYTES - 1)) / \ + SVE_VQ_BYTES * SVE_VQ_BYTES) + +#define SVE_PT_SIZE(vq, flags) \ + (((flags)&SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE \ + ? SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, flags) \ + : SVE_PT_FPSIMD_OFFSET + SVE_PT_FPSIMD_SIZE(vq, flags)) + +#endif +#endif Index: lldb/source/Plugins/Process/Linux/NativeRegisterContextLinux.h =================================================================== --- lldb/source/Plugins/Process/Linux/NativeRegisterContextLinux.h +++ lldb/source/Plugins/Process/Linux/NativeRegisterContextLinux.h @@ -31,6 +31,9 @@ // Invalidates cached values in register context data structures virtual void InvalidateAllRegisters(){} + // Configures register context based on target capabilities + virtual void ConfigureRegisterContext() {} + protected: lldb::ByteOrder GetByteOrder() const; Index: lldb/source/Plugins/Process/Linux/NativeRegisterContextLinux_arm64.h =================================================================== --- lldb/source/Plugins/Process/Linux/NativeRegisterContextLinux_arm64.h +++ lldb/source/Plugins/Process/Linux/NativeRegisterContextLinux_arm64.h @@ -14,11 +14,25 @@ #include "Plugins/Process/Linux/NativeRegisterContextLinux.h" #include "Plugins/Process/Utility/lldb-arm64-register-enums.h" +#include + +#ifndef SVE_PT_REGS_SVE +#define INCLUDE_LINUX_PTRACE_DEFINITIONS_FOR_SVE_ARM64 +#include "Plugins/Process/Linux/LinuxPTraceDefines_arm64sve.h" +#endif + namespace lldb_private { namespace process_linux { class NativeProcessLinux; +enum class SVE_STATE { + SVE_STATE_UNKNOWN, + SVE_STATE_DISABLED, + SVE_STATE_FPSIMD, + SVE_STATE_FULL +}; + class NativeRegisterContextLinux_arm64 : public NativeRegisterContextLinux { public: NativeRegisterContextLinux_arm64(const ArchSpec &target_arch, @@ -28,6 +42,8 @@ uint32_t GetUserRegisterCount() const override; + uint32_t GetNativeRegisterIndex(uint32_t reg_index) const override; + const RegisterSet *GetRegisterSet(uint32_t set_index) const override; Status ReadRegister(const RegisterInfo *reg_info, @@ -42,6 +58,8 @@ void InvalidateAllRegisters() override; + void ConfigureRegisterContext() override; + // Hardware breakpoints/watchpoint management functions uint32_t NumSupportedHardwareBreakpoints() override; @@ -88,17 +106,34 @@ Status WriteFPR() override; + Status ReadAllSVE(); + + Status WriteAllSVE(); + + Status ReadSVEHeader(); + + Status WriteSVEHeader(); + void *GetGPRBuffer() override { return &m_gpr_arm64; } void *GetFPRBuffer() override { return &m_fpr; } size_t GetFPRSize() override { return sizeof(m_fpr); } + void *GetSVEHeader() { return &m_sve_header; } + + void *GetSVEBuffer(); + + size_t GetSVEHeaderSize() { return sizeof(m_sve_header); } + + size_t GetSVEBufferSize() { return m_sve_ptrace_payload.size(); } + private: struct RegInfo { uint32_t num_registers; uint32_t num_gpr_registers; uint32_t num_fpr_registers; + uint32_t num_sve_registers; uint32_t last_gpr; uint32_t first_fpr; @@ -107,6 +142,9 @@ uint32_t first_fpr_v; uint32_t last_fpr_v; + uint32_t first_sve; + uint32_t last_sve; + uint32_t gpr_flags; }; @@ -131,11 +169,20 @@ bool m_gpr_is_valid; bool m_fpu_is_valid; + bool m_sve_buffer_is_valid; + + bool m_sve_header_is_valid; + bool m_sve_update_reg_infos; GPR m_gpr_arm64; // 64-bit general purpose registers. + RegInfo m_reg_info; FPU m_fpr; // floating-point registers including extended register sets. + mutable SVE_STATE m_sve_state; + struct user_sve_header m_sve_header; + std::vector m_sve_ptrace_payload; + // Debug register info for hardware breakpoints and watchpoints management. struct DREG { lldb::addr_t address; // Breakpoint/watchpoint address value. @@ -157,11 +204,25 @@ bool IsFPR(unsigned reg) const; + bool IsSVE(unsigned reg) const; + + bool IsSVEZReg(unsigned reg) const; + + bool IsSVEPReg(unsigned reg) const; + + bool IsSVERegVG(unsigned reg) const; + + uint64_t GetSVERegVG() { return m_sve_header.vl / 8; } + + void SetSVERegVG(uint64_t vg) { m_sve_header.vl = vg * 8; } + Status ReadHardwareDebugInfo(); Status WriteHardwareDebugRegs(int hwbType); uint32_t CalculateFprOffset(const RegisterInfo *reg_info) const; + + uint32_t CalculateSVEOffset(uint32_t reg_num) const; }; } // namespace process_linux Index: lldb/source/Plugins/Process/Linux/NativeRegisterContextLinux_arm64.cpp =================================================================== --- lldb/source/Plugins/Process/Linux/NativeRegisterContextLinux_arm64.cpp +++ lldb/source/Plugins/Process/Linux/NativeRegisterContextLinux_arm64.cpp @@ -28,8 +28,10 @@ #include // NT_PRSTATUS and NT_FPREGSET definition #include -// user_hwdebug_state definition -#include + +#ifndef NT_ARM_SVE +#define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension */ +#endif #define REG_CONTEXT_SIZE (GetGPRSize() + GetFPRSize()) @@ -95,9 +97,34 @@ 1) == k_num_fpr_registers_arm64, "g_fpu_regnums_arm64 has wrong number of register infos"); +// ARM64 SVE registers. +static const uint32_t g_sve_regnums_arm64[] = { + sve_vg_arm64, + + sve_z0_arm64, sve_z1_arm64, sve_z2_arm64, sve_z3_arm64, + sve_z4_arm64, sve_z5_arm64, sve_z6_arm64, sve_z7_arm64, + sve_z8_arm64, sve_z9_arm64, sve_z10_arm64, sve_z11_arm64, + sve_z12_arm64, sve_z13_arm64, sve_z14_arm64, sve_z15_arm64, + sve_z16_arm64, sve_z17_arm64, sve_z18_arm64, sve_z19_arm64, + sve_z20_arm64, sve_z21_arm64, sve_z22_arm64, sve_z23_arm64, + sve_z24_arm64, sve_z25_arm64, sve_z26_arm64, sve_z27_arm64, + sve_z28_arm64, sve_z29_arm64, sve_z30_arm64, sve_z31_arm64, + + sve_p0_arm64, sve_p1_arm64, sve_p2_arm64, sve_p3_arm64, + sve_p4_arm64, sve_p5_arm64, sve_p6_arm64, sve_p7_arm64, + sve_p8_arm64, sve_p9_arm64, sve_p10_arm64, sve_p11_arm64, + sve_p12_arm64, sve_p13_arm64, sve_p14_arm64, sve_p15_arm64, + + sve_ffr_arm64, + LLDB_INVALID_REGNUM // register sets need to end with this flag +}; +static_assert(((sizeof g_sve_regnums_arm64 / sizeof g_sve_regnums_arm64[0]) - + 1) == k_num_sve_registers_arm64, + "g_sve_regnums_arm64 has wrong number of register infos"); + namespace { // Number of register sets provided by this context. -enum { k_num_register_sets = 2 }; +enum { k_num_register_sets = 3 }; } // Register sets for ARM64. @@ -105,7 +132,8 @@ {"General Purpose Registers", "gpr", k_num_gpr_registers_arm64, g_gpr_regnums_arm64}, {"Floating Point Registers", "fpu", k_num_fpr_registers_arm64, - g_fpu_regnums_arm64}}; + g_fpu_regnums_arm64}, + {"SVE Registers", "sve", k_num_sve_registers_arm64, g_sve_regnums_arm64}}; std::unique_ptr NativeRegisterContextLinux::CreateHostNativeRegisterContextLinux( @@ -131,12 +159,16 @@ m_reg_info.num_registers = k_num_registers_arm64; m_reg_info.num_gpr_registers = k_num_gpr_registers_arm64; m_reg_info.num_fpr_registers = k_num_fpr_registers_arm64; + m_reg_info.num_sve_registers = k_num_sve_registers_arm64; + m_reg_info.last_gpr = k_last_gpr_arm64; m_reg_info.first_fpr = k_first_fpr_arm64; m_reg_info.last_fpr = k_last_fpr_arm64; m_reg_info.first_fpr_v = fpu_v0_arm64; m_reg_info.last_fpr_v = fpu_v31_arm64; m_reg_info.gpr_flags = gpr_cpsr_arm64; + m_reg_info.first_sve = sve_vg_arm64; + m_reg_info.last_sve = sve_ffr_arm64; break; default: llvm_unreachable("Unhandled target architecture."); @@ -147,6 +179,7 @@ ::memset(&m_gpr_arm64, 0, sizeof(m_gpr_arm64)); ::memset(&m_hwp_regs, 0, sizeof(m_hwp_regs)); ::memset(&m_hbr_regs, 0, sizeof(m_hbr_regs)); + ::memset(&m_sve_header, 0, sizeof(m_sve_header)); // 16 is just a maximum value, query hardware for actual watchpoint count m_max_hwp_supported = 16; @@ -155,25 +188,58 @@ m_gpr_is_valid = false; m_fpu_is_valid = false; + m_sve_buffer_is_valid = false; + + m_sve_header_is_valid = false; + m_sve_update_reg_infos = true; + + // SVE is not enabled until we query user_sve_header + m_sve_state = SVE_STATE::SVE_STATE_UNKNOWN; } uint32_t NativeRegisterContextLinux_arm64::GetRegisterSetCount() const { - return k_num_register_sets; + if (m_sve_state == SVE_STATE::SVE_STATE_FPSIMD || + m_sve_state == SVE_STATE::SVE_STATE_FULL) + + return k_num_register_sets; + else + return k_num_register_sets - 1; } const RegisterSet * NativeRegisterContextLinux_arm64::GetRegisterSet(uint32_t set_index) const { - if (set_index < k_num_register_sets) + if (set_index < GetRegisterSetCount()) return &g_reg_sets_arm64[set_index]; return nullptr; } uint32_t NativeRegisterContextLinux_arm64::GetUserRegisterCount() const { - uint32_t count = 0; - for (uint32_t set_index = 0; set_index < k_num_register_sets; ++set_index) - count += g_reg_sets_arm64[set_index].num_registers; - return count; + if (m_sve_state == SVE_STATE::SVE_STATE_FPSIMD || + m_sve_state == SVE_STATE::SVE_STATE_FULL) + return k_num_gpr_registers_arm64 + k_num_fpr_registers_arm64 + + k_num_sve_registers_arm64; + + return k_num_gpr_registers_arm64 + k_num_fpr_registers_arm64; +} + +uint32_t NativeRegisterContextLinux_arm64::GetNativeRegisterIndex( + uint32_t reg_index) const { + if (reg_index < m_reg_info.num_gpr_registers) + return g_reg_sets_arm64[0].registers[reg_index]; + + reg_index -= m_reg_info.num_gpr_registers; + if (reg_index < m_reg_info.num_fpr_registers) + return g_reg_sets_arm64[1].registers[reg_index]; + + if (m_sve_state == SVE_STATE::SVE_STATE_FPSIMD || + m_sve_state == SVE_STATE::SVE_STATE_FULL) { + reg_index -= m_reg_info.num_fpr_registers; + if (reg_index < m_reg_info.num_sve_registers) + return g_reg_sets_arm64[2].registers[reg_index]; + } + + return LLDB_INVALID_REGNUM; } Status @@ -195,6 +261,8 @@ uint8_t *src; uint32_t offset; + uint64_t sve_vg; + std::vector sve_reg_non_live; if (IsGPR(reg)) { if (!m_gpr_is_valid) { @@ -208,15 +276,70 @@ src = (uint8_t *)GetGPRBuffer() + offset; } else if (IsFPR(reg)) { - if (!m_fpu_is_valid) { + if (m_sve_state == SVE_STATE::SVE_STATE_DISABLED) { + // SVE is disabled take legacy route for FPU register access + if (!m_fpu_is_valid) { - error = ReadFPR(); - if (error.Fail()) - return error; + error = ReadFPR(); + if (error.Fail()) + return error; + } + offset = CalculateFprOffset(reg_info); + assert(offset < GetFPRSize()); + src = (uint8_t *)GetFPRBuffer() + offset; + } else { + // SVE enabled, we will read and cache SVE ptrace data + if (!m_sve_buffer_is_valid) { + error = ReadAllSVE(); + if (error.Fail()) + return error; + } + // Extract SVE Z register value register number for this reg_info + uint32_t sve_reg_num = LLDB_INVALID_REGNUM; + if (reg_info->value_regs && + reg_info->value_regs[0] != LLDB_INVALID_REGNUM) + sve_reg_num = reg_info->value_regs[0]; + else if (reg == fpu_fpcr_arm64 || reg == fpu_fpsr_arm64) + sve_reg_num = reg; + if (sve_reg_num != LLDB_INVALID_REGNUM) { + offset = CalculateSVEOffset(sve_reg_num); + assert(offset < GetSVEBufferSize()); + src = (uint8_t *)GetSVEBuffer() + offset; + } + } + } else if (IsSVERegVG(reg)) { + + sve_vg = GetSVERegVG(); + src = (uint8_t *)&sve_vg; + + } else if (IsSVE(reg)) { + if (m_sve_state == SVE_STATE::SVE_STATE_DISABLED) { + return Status("SVE disabled or not supported"); + } else { + // SVE enabled, we will read and cache SVE ptrace data + if (!m_sve_buffer_is_valid) { + error = ReadAllSVE(); + if (error.Fail()) + return error; + } + if (m_sve_state == SVE_STATE::SVE_STATE_FPSIMD) { + sve_reg_non_live.resize(reg_info->byte_size, 0); + // In FPSIMD state SVE payload mirrors legacy fpsimd struct and so just + // copy 16 bytes of v register to the start of z register. All other + // SVE register will be set to zero. + if (IsSVEZReg(reg)) { + offset = CalculateSVEOffset(reg); + assert(offset < GetSVEBufferSize()); + ::memcpy(sve_reg_non_live.data(), (uint8_t *)GetSVEBuffer() + offset, + 16); + } + src = sve_reg_non_live.data(); + } else if (m_sve_state == SVE_STATE::SVE_STATE_FULL) { + offset = CalculateSVEOffset(reg); + assert(offset < GetSVEBufferSize()); + src = (uint8_t *)GetSVEBuffer() + offset; + } } - offset = CalculateFprOffset(reg_info); - assert(offset < GetFPRSize()); - src = (uint8_t *)GetFPRBuffer() + offset; } else return Status("failed - register wasn't recognized to be a GPR or an FPR, " "write strategy unknown"); @@ -243,6 +366,7 @@ uint8_t *dst; uint32_t offset; + std::vector sve_reg_non_live; if (IsGPR(reg)) { if (!m_gpr_is_valid) { @@ -259,20 +383,96 @@ return WriteGPR(); } else if (IsFPR(reg)) { - if (!m_fpu_is_valid) { - error = ReadFPR(); - if (error.Fail()) - return error; + if (m_sve_state == SVE_STATE::SVE_STATE_DISABLED) { + // SVE is disabled take legacy route for FPU register access + if (!m_fpu_is_valid) { + error = ReadFPR(); + if (error.Fail()) + return error; + } + offset = CalculateFprOffset(reg_info); + assert(offset < GetFPRSize()); + dst = (uint8_t *)GetFPRBuffer() + offset; + + ::memcpy(dst, reg_value.GetBytes(), reg_info->byte_size); + + return WriteFPR(); + } else { + // SVE enabled, we will read and cache SVE ptrace data + if (!m_sve_buffer_is_valid) { + error = ReadAllSVE(); + if (error.Fail()) + return error; + } + // Extract SVE Z register value register number for this reg_info + uint32_t sve_reg_num = LLDB_INVALID_REGNUM; + if (reg_info->value_regs && + reg_info->value_regs[0] != LLDB_INVALID_REGNUM) + sve_reg_num = reg_info->value_regs[0]; + else if (reg == fpu_fpcr_arm64 || reg == fpu_fpsr_arm64) + sve_reg_num = reg; + if (sve_reg_num != LLDB_INVALID_REGNUM) { + offset = CalculateSVEOffset(sve_reg_num); + assert(offset < GetSVEBufferSize()); + dst = (uint8_t *)GetSVEBuffer() + offset; + ::memcpy(dst, reg_value.GetBytes(), reg_info->byte_size); + return WriteAllSVE(); + } } - offset = CalculateFprOffset(reg_info); - assert(offset < GetFPRSize()); - dst = (uint8_t *)GetFPRBuffer() + offset; - - ::memcpy(dst, reg_value.GetBytes(), reg_info->byte_size); + } else if (IsSVERegVG(reg)) { + return Status("SVE state change operation not supported"); + } else if (IsSVE(reg)) { + if (m_sve_state == SVE_STATE::SVE_STATE_DISABLED) { + return Status("SVE disabled or not supported"); + } else { + // Target has SVE enabled, we will read and cache SVE ptrace data + if (!m_sve_buffer_is_valid) { + error = ReadAllSVE(); + if (error.Fail()) + return error; + } - return WriteFPR(); + // If target supports SVE but currently in FPSIMD mode. + if (m_sve_state == SVE_STATE::SVE_STATE_FPSIMD) { + // Here we will check if writing this SVE register enables + // SVE_STATE_FULL + bool set_sve_state_full = false; + const uint8_t *reg_bytes = (const uint8_t *)reg_value.GetBytes(); + if (IsSVEZReg(reg)) { + for (uint32_t i = 16; i < reg_info->byte_size; i++) { + if (reg_bytes[i]) { + set_sve_state_full = true; + break; + } + } + } else if (IsSVEPReg(reg) || reg == sve_ffr_arm64) { + for (uint32_t i = 0; i < reg_info->byte_size; i++) { + if (reg_bytes[i]) { + set_sve_state_full = true; + break; + } + } + } + if (set_sve_state_full) { + return Status("SVE state change operation not supported"); + } else if (!set_sve_state_full && IsSVEZReg(reg)) { + // We are writing a Z register which is zero beyond 16 bytes so copy + // first 16 bytes only as SVE payload mirrors legacy fpsimd structure + offset = CalculateSVEOffset(reg); + assert(offset < GetSVEBufferSize()); + dst = (uint8_t *)GetSVEBuffer() + offset; + ::memcpy(dst, reg_value.GetBytes(), 16); + return WriteAllSVE(); + } + } else if (m_sve_state == SVE_STATE::SVE_STATE_FULL) { + offset = CalculateSVEOffset(reg); + assert(offset < GetSVEBufferSize()); + dst = (uint8_t *)GetSVEBuffer() + offset; + ::memcpy(dst, reg_value.GetBytes(), reg_info->byte_size); + return WriteAllSVE(); + } + } } - return error; } @@ -351,6 +551,22 @@ return (m_reg_info.first_fpr <= reg && reg <= m_reg_info.last_fpr); } +bool NativeRegisterContextLinux_arm64::IsSVE(unsigned reg) const { + return (m_reg_info.first_sve <= reg && reg <= m_reg_info.last_sve); +} + +bool NativeRegisterContextLinux_arm64::IsSVEZReg(unsigned reg) const { + return (sve_z0_arm64 <= reg && reg <= sve_z31_arm64); +} + +bool NativeRegisterContextLinux_arm64::IsSVEPReg(unsigned reg) const { + return (sve_p0_arm64 <= reg && reg <= sve_p15_arm64); +} + +bool NativeRegisterContextLinux_arm64::IsSVERegVG(unsigned reg) const { + return (m_reg_info.first_sve == reg); +} + uint32_t NativeRegisterContextLinux_arm64::NumSupportedHardwareBreakpoints() { Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_BREAKPOINTS)); @@ -710,198 +926,316 @@ default: return 0; } -} -bool NativeRegisterContextLinux_arm64::WatchpointIsEnabled(uint32_t wp_index) { - Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_WATCHPOINTS)); - LLDB_LOG(log, "wp_index: {0}", wp_index); + } + bool + NativeRegisterContextLinux_arm64::WatchpointIsEnabled(uint32_t wp_index) { + Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_WATCHPOINTS)); + LLDB_LOG(log, "wp_index: {0}", wp_index); - if ((m_hwp_regs[wp_index].control & 0x1) == 0x1) - return true; - else - return false; -} + if ((m_hwp_regs[wp_index].control & 0x1) == 0x1) + return true; + else + return false; + } -Status NativeRegisterContextLinux_arm64::GetWatchpointHitIndex( - uint32_t &wp_index, lldb::addr_t trap_addr) { - Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_WATCHPOINTS)); - LLDB_LOG(log, "wp_index: {0}, trap_addr: {1:x}", wp_index, trap_addr); + Status NativeRegisterContextLinux_arm64::GetWatchpointHitIndex( + uint32_t &wp_index, lldb::addr_t trap_addr) { + Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_WATCHPOINTS)); + LLDB_LOG(log, "wp_index: {0}, trap_addr: {1:x}", wp_index, trap_addr); - uint32_t watch_size; - lldb::addr_t watch_addr; + uint32_t watch_size; + lldb::addr_t watch_addr; - for (wp_index = 0; wp_index < m_max_hwp_supported; ++wp_index) { - watch_size = GetWatchpointSize(wp_index); - watch_addr = m_hwp_regs[wp_index].address; + for (wp_index = 0; wp_index < m_max_hwp_supported; ++wp_index) { + watch_size = GetWatchpointSize(wp_index); + watch_addr = m_hwp_regs[wp_index].address; - if (WatchpointIsEnabled(wp_index) && trap_addr >= watch_addr && - trap_addr < watch_addr + watch_size) { - m_hwp_regs[wp_index].hit_addr = trap_addr; - return Status(); + if (WatchpointIsEnabled(wp_index) && trap_addr >= watch_addr && + trap_addr < watch_addr + watch_size) { + m_hwp_regs[wp_index].hit_addr = trap_addr; + return Status(); + } } + + wp_index = LLDB_INVALID_INDEX32; + return Status(); } - wp_index = LLDB_INVALID_INDEX32; - return Status(); -} + lldb::addr_t + NativeRegisterContextLinux_arm64::GetWatchpointAddress(uint32_t wp_index) { + Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_WATCHPOINTS)); + LLDB_LOG(log, "wp_index: {0}", wp_index); -lldb::addr_t -NativeRegisterContextLinux_arm64::GetWatchpointAddress(uint32_t wp_index) { - Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_WATCHPOINTS)); - LLDB_LOG(log, "wp_index: {0}", wp_index); + if (wp_index >= m_max_hwp_supported) + return LLDB_INVALID_ADDRESS; - if (wp_index >= m_max_hwp_supported) - return LLDB_INVALID_ADDRESS; + if (WatchpointIsEnabled(wp_index)) + return m_hwp_regs[wp_index].real_addr; + else + return LLDB_INVALID_ADDRESS; + } - if (WatchpointIsEnabled(wp_index)) - return m_hwp_regs[wp_index].real_addr; - else - return LLDB_INVALID_ADDRESS; -} + lldb::addr_t + NativeRegisterContextLinux_arm64::GetWatchpointHitAddress(uint32_t wp_index) { + Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_WATCHPOINTS)); + LLDB_LOG(log, "wp_index: {0}", wp_index); -lldb::addr_t -NativeRegisterContextLinux_arm64::GetWatchpointHitAddress(uint32_t wp_index) { - Log *log(ProcessPOSIXLog::GetLogIfAllCategoriesSet(POSIX_LOG_WATCHPOINTS)); - LLDB_LOG(log, "wp_index: {0}", wp_index); + if (wp_index >= m_max_hwp_supported) + return LLDB_INVALID_ADDRESS; - if (wp_index >= m_max_hwp_supported) - return LLDB_INVALID_ADDRESS; + if (WatchpointIsEnabled(wp_index)) + return m_hwp_regs[wp_index].hit_addr; + else + return LLDB_INVALID_ADDRESS; + } - if (WatchpointIsEnabled(wp_index)) - return m_hwp_regs[wp_index].hit_addr; - else - return LLDB_INVALID_ADDRESS; -} + Status NativeRegisterContextLinux_arm64::ReadHardwareDebugInfo() { + if (!m_refresh_hwdebug_info) { + return Status(); + } -Status NativeRegisterContextLinux_arm64::ReadHardwareDebugInfo() { - if (!m_refresh_hwdebug_info) { - return Status(); - } + ::pid_t tid = m_thread.GetID(); - ::pid_t tid = m_thread.GetID(); + int regset = NT_ARM_HW_WATCH; + struct iovec ioVec; + struct user_hwdebug_state dreg_state; + Status error; - int regset = NT_ARM_HW_WATCH; - struct iovec ioVec; - struct user_hwdebug_state dreg_state; - Status error; + ioVec.iov_base = &dreg_state; + ioVec.iov_len = sizeof(dreg_state); + error = NativeProcessLinux::PtraceWrapper(PTRACE_GETREGSET, tid, ®set, + &ioVec, ioVec.iov_len); - ioVec.iov_base = &dreg_state; - ioVec.iov_len = sizeof(dreg_state); - error = NativeProcessLinux::PtraceWrapper(PTRACE_GETREGSET, tid, ®set, - &ioVec, ioVec.iov_len); + if (error.Fail()) + return error; + + m_max_hwp_supported = dreg_state.dbg_info & 0xff; + + regset = NT_ARM_HW_BREAK; + error = NativeProcessLinux::PtraceWrapper(PTRACE_GETREGSET, tid, ®set, + &ioVec, ioVec.iov_len); + + if (error.Fail()) + return error; + + m_max_hbp_supported = dreg_state.dbg_info & 0xff; + m_refresh_hwdebug_info = false; - if (error.Fail()) return error; + } - m_max_hwp_supported = dreg_state.dbg_info & 0xff; + Status NativeRegisterContextLinux_arm64::WriteHardwareDebugRegs(int hwbType) { + struct iovec ioVec; + struct user_hwdebug_state dreg_state; + Status error; - regset = NT_ARM_HW_BREAK; - error = NativeProcessLinux::PtraceWrapper(PTRACE_GETREGSET, tid, ®set, - &ioVec, ioVec.iov_len); + memset(&dreg_state, 0, sizeof(dreg_state)); + ioVec.iov_base = &dreg_state; + + if (hwbType == eDREGTypeWATCH) { + hwbType = NT_ARM_HW_WATCH; + ioVec.iov_len = sizeof(dreg_state.dbg_info) + sizeof(dreg_state.pad) + + (sizeof(dreg_state.dbg_regs[0]) * m_max_hwp_supported); + + for (uint32_t i = 0; i < m_max_hwp_supported; i++) { + dreg_state.dbg_regs[i].addr = m_hwp_regs[i].address; + dreg_state.dbg_regs[i].ctrl = m_hwp_regs[i].control; + } + } else { + hwbType = NT_ARM_HW_BREAK; + ioVec.iov_len = sizeof(dreg_state.dbg_info) + sizeof(dreg_state.pad) + + (sizeof(dreg_state.dbg_regs[0]) * m_max_hbp_supported); + + for (uint32_t i = 0; i < m_max_hbp_supported; i++) { + dreg_state.dbg_regs[i].addr = m_hbr_regs[i].address; + dreg_state.dbg_regs[i].ctrl = m_hbr_regs[i].control; + } + } + + return NativeProcessLinux::PtraceWrapper(PTRACE_SETREGSET, m_thread.GetID(), + &hwbType, &ioVec, ioVec.iov_len); + } + + Status NativeRegisterContextLinux_arm64::ReadGPR() { + Status error; + + struct iovec ioVec; + + ioVec.iov_base = GetGPRBuffer(); + ioVec.iov_len = GetGPRSize(); + + error = ReadRegisterSet(&ioVec, GetGPRSize(), NT_PRSTATUS); + + if (error.Success()) + m_gpr_is_valid = true; - if (error.Fail()) return error; + } - m_max_hbp_supported = dreg_state.dbg_info & 0xff; - m_refresh_hwdebug_info = false; + Status NativeRegisterContextLinux_arm64::WriteGPR() { + struct iovec ioVec; - return error; -} + m_gpr_is_valid = false; -Status NativeRegisterContextLinux_arm64::WriteHardwareDebugRegs(int hwbType) { - struct iovec ioVec; - struct user_hwdebug_state dreg_state; - Status error; + ioVec.iov_base = GetGPRBuffer(); + ioVec.iov_len = GetGPRSize(); - memset(&dreg_state, 0, sizeof(dreg_state)); - ioVec.iov_base = &dreg_state; + return WriteRegisterSet(&ioVec, GetGPRSize(), NT_PRSTATUS); + } - if (hwbType == eDREGTypeWATCH) { - hwbType = NT_ARM_HW_WATCH; - ioVec.iov_len = sizeof(dreg_state.dbg_info) + sizeof(dreg_state.pad) + - (sizeof(dreg_state.dbg_regs[0]) * m_max_hwp_supported); + Status NativeRegisterContextLinux_arm64::ReadFPR() { + Status error; - for (uint32_t i = 0; i < m_max_hwp_supported; i++) { - dreg_state.dbg_regs[i].addr = m_hwp_regs[i].address; - dreg_state.dbg_regs[i].ctrl = m_hwp_regs[i].control; - } - } else { - hwbType = NT_ARM_HW_BREAK; - ioVec.iov_len = sizeof(dreg_state.dbg_info) + sizeof(dreg_state.pad) + - (sizeof(dreg_state.dbg_regs[0]) * m_max_hbp_supported); - - for (uint32_t i = 0; i < m_max_hbp_supported; i++) { - dreg_state.dbg_regs[i].addr = m_hbr_regs[i].address; - dreg_state.dbg_regs[i].ctrl = m_hbr_regs[i].control; - } + struct iovec ioVec; + + ioVec.iov_base = GetFPRBuffer(); + ioVec.iov_len = GetFPRSize(); + + error = ReadRegisterSet(&ioVec, GetFPRSize(), NT_FPREGSET); + + if (error.Success()) + m_fpu_is_valid = true; + + return error; } - return NativeProcessLinux::PtraceWrapper(PTRACE_SETREGSET, m_thread.GetID(), - &hwbType, &ioVec, ioVec.iov_len); -} + Status NativeRegisterContextLinux_arm64::WriteFPR() { + struct iovec ioVec; -Status NativeRegisterContextLinux_arm64::ReadGPR() { - Status error; + m_fpu_is_valid = false; - struct iovec ioVec; + ioVec.iov_base = GetFPRBuffer(); + ioVec.iov_len = GetFPRSize(); - ioVec.iov_base = GetGPRBuffer(); - ioVec.iov_len = GetGPRSize(); + return WriteRegisterSet(&ioVec, GetFPRSize(), NT_FPREGSET); + } - error = ReadRegisterSet(&ioVec, GetGPRSize(), NT_PRSTATUS); + Status NativeRegisterContextLinux_arm64::ReadSVEHeader() { + Status error; - if (error.Success()) - m_gpr_is_valid = true; + struct iovec ioVec; - return error; -} + ioVec.iov_base = GetSVEHeader(); + ioVec.iov_len = GetSVEHeaderSize(); -Status NativeRegisterContextLinux_arm64::WriteGPR() { - struct iovec ioVec; + error = ReadRegisterSet(&ioVec, GetSVEHeaderSize(), NT_ARM_SVE); - m_gpr_is_valid = false; + m_sve_header_is_valid = true; - ioVec.iov_base = GetGPRBuffer(); - ioVec.iov_len = GetGPRSize(); + return error; + } - return WriteRegisterSet(&ioVec, GetGPRSize(), NT_PRSTATUS); -} + Status NativeRegisterContextLinux_arm64::WriteSVEHeader() { + Status error; -Status NativeRegisterContextLinux_arm64::ReadFPR() { - Status error; + struct iovec ioVec; - struct iovec ioVec; + ioVec.iov_base = GetSVEHeader(); + ioVec.iov_len = GetSVEHeaderSize(); - ioVec.iov_base = GetFPRBuffer(); - ioVec.iov_len = GetFPRSize(); + m_sve_buffer_is_valid = false; + m_sve_header_is_valid = false; - error = ReadRegisterSet(&ioVec, GetFPRSize(), NT_FPREGSET); + return WriteRegisterSet(&ioVec, GetSVEHeaderSize(), NT_ARM_SVE); + } - if (error.Success()) - m_fpu_is_valid = true; + Status NativeRegisterContextLinux_arm64::ReadAllSVE() { + Status error; - return error; -} + struct iovec ioVec; -Status NativeRegisterContextLinux_arm64::WriteFPR() { - struct iovec ioVec; + ioVec.iov_base = GetSVEBuffer(); + ioVec.iov_len = GetSVEBufferSize(); - m_fpu_is_valid = false; + error = ReadRegisterSet(&ioVec, GetSVEBufferSize(), NT_ARM_SVE); - ioVec.iov_base = GetFPRBuffer(); - ioVec.iov_len = GetFPRSize(); + if (error.Success()) + m_sve_buffer_is_valid = true; - return WriteRegisterSet(&ioVec, GetFPRSize(), NT_FPREGSET); -} + return error; + } -void NativeRegisterContextLinux_arm64::InvalidateAllRegisters() { - m_gpr_is_valid = false; - m_fpu_is_valid = false; -} + Status NativeRegisterContextLinux_arm64::WriteAllSVE() { + Status error; -uint32_t NativeRegisterContextLinux_arm64::CalculateFprOffset( - const RegisterInfo *reg_info) const { - return reg_info->byte_offset - - GetRegisterInfoAtIndex(m_reg_info.first_fpr)->byte_offset; -} + struct iovec ioVec; + + ioVec.iov_base = GetSVEBuffer(); + ioVec.iov_len = GetSVEBufferSize(); + + m_sve_buffer_is_valid = false; + m_sve_header_is_valid = false; + + return WriteRegisterSet(&ioVec, GetSVEBufferSize(), NT_ARM_SVE); + } + + void NativeRegisterContextLinux_arm64::InvalidateAllRegisters() { + m_gpr_is_valid = false; + m_fpu_is_valid = false; + m_sve_buffer_is_valid = false; + m_sve_header_is_valid = false; + } + + void NativeRegisterContextLinux_arm64::ConfigureRegisterContext() { + if (!m_sve_header_is_valid) { + Status error = ReadSVEHeader(); + + if (error.Success()) { + if ((m_sve_header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) + m_sve_state = SVE_STATE::SVE_STATE_FPSIMD; + else if ((m_sve_header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE) + m_sve_state = SVE_STATE::SVE_STATE_FULL; + + if (sve_vl_valid(m_sve_header.vl)) { + size_t vq = sve_vq_from_vl(m_sve_header.vl); + SetRegisterInfoMode(vq); + m_sve_ptrace_payload.resize(SVE_PT_SIZE(vq, SVE_PT_REGS_SVE)); + } else { + m_sve_state = SVE_STATE::SVE_STATE_DISABLED; + SetRegisterInfoMode(eRegisterInfoModeAArch64); + } + } else { + m_sve_state = SVE_STATE::SVE_STATE_DISABLED; + SetRegisterInfoMode(eRegisterInfoModeAArch64); + } + } + } + + uint32_t NativeRegisterContextLinux_arm64::CalculateFprOffset( + const RegisterInfo *reg_info) const { + return reg_info->byte_offset - + GetRegisterInfoAtIndex(m_reg_info.first_fpr)->byte_offset; + } + + uint32_t + NativeRegisterContextLinux_arm64::CalculateSVEOffset(uint32_t reg_num) const { + if (m_sve_state == SVE_STATE::SVE_STATE_FPSIMD) { + if (IsSVEZReg(reg_num)) + return (reg_num - sve_z0_arm64) * 16; + else if (reg_num == fpu_fpsr_arm64) + return 32 * 16; + else if (reg_num == fpu_fpcr_arm64) + return (32 * 16) + 4; + } else if (m_sve_state == SVE_STATE::SVE_STATE_FULL) { + size_t vq = sve_vq_from_vl(m_sve_header.vl); + if (IsSVEZReg(reg_num)) + return SVE_PT_SVE_ZREG_OFFSET(vq, reg_num - sve_z0_arm64); + else if (IsSVEPReg(reg_num)) + return SVE_PT_SVE_PREG_OFFSET(vq, reg_num - sve_p0_arm64); + else if (reg_num == sve_ffr_arm64) + return SVE_PT_SVE_FFR_OFFSET(vq); + else if (reg_num == fpu_fpsr_arm64) + return SVE_PT_SVE_FPSR_OFFSET(vq); + else if (reg_num == fpu_fpcr_arm64) + return SVE_PT_SVE_FPCR_OFFSET(vq); + } + return 0; + } + + void *NativeRegisterContextLinux_arm64::GetSVEBuffer() { + if (m_sve_state == SVE_STATE::SVE_STATE_FPSIMD) + return m_sve_ptrace_payload.data() + SVE_PT_FPSIMD_OFFSET; + return m_sve_ptrace_payload.data(); + } #endif // defined (__arm64__) || defined (__aarch64__) Index: lldb/source/Plugins/Process/Linux/NativeThreadLinux.h =================================================================== --- lldb/source/Plugins/Process/Linux/NativeThreadLinux.h +++ lldb/source/Plugins/Process/Linux/NativeThreadLinux.h @@ -39,6 +39,9 @@ std::string &description) override; NativeRegisterContextLinux &GetRegisterContext() override { + if (m_reg_context_up && IsStopped(nullptr)) + m_reg_context_up->ConfigureRegisterContext(); + return *m_reg_context_up; } Index: lldb/source/Plugins/Process/Utility/NativeRegisterContextRegisterInfo.h =================================================================== --- lldb/source/Plugins/Process/Utility/NativeRegisterContextRegisterInfo.h +++ lldb/source/Plugins/Process/Utility/NativeRegisterContextRegisterInfo.h @@ -35,6 +35,10 @@ const RegisterInfoInterface &GetRegisterInfoInterface() const; + uint32_t SetRegisterInfoMode(uint32_t mode) { + return m_register_info_interface_up->SetRegisterInfoMode(mode); + } + private: std::unique_ptr m_register_info_interface_up; }; Index: lldb/source/Plugins/Process/Utility/RegisterInfoInterface.h =================================================================== --- lldb/source/Plugins/Process/Utility/RegisterInfoInterface.h +++ lldb/source/Plugins/Process/Utility/RegisterInfoInterface.h @@ -61,6 +61,12 @@ return nullptr; } + virtual uint32_t SetRegisterInfoMode(uint32_t mode, uint32_t offset = 0) { + return 0; + } + + virtual uint32_t GetRegisterInfoMode() const { return 0; } + public: // FIXME make private. lldb_private::ArchSpec m_target_arch; Index: lldb/source/Plugins/Process/Utility/RegisterInfoPOSIX_arm64.h =================================================================== --- lldb/source/Plugins/Process/Utility/RegisterInfoPOSIX_arm64.h +++ lldb/source/Plugins/Process/Utility/RegisterInfoPOSIX_arm64.h @@ -12,6 +12,14 @@ #include "RegisterInfoInterface.h" #include "lldb/Target/RegisterContext.h" #include "lldb/lldb-private.h" +#include + +// AArch64 Register set FP/SIMD feature configuration +enum { + eRegisterInfoModeAArch64, + eRegisterInfoModeAArch64SVE, + eRegisterInfoModeAArch64SVEMax = 256 +}; class RegisterInfoPOSIX_arm64 : public lldb_private::RegisterInfoInterface { public: @@ -61,7 +69,26 @@ uint32_t GetRegisterCount() const override; + uint32_t SetRegisterInfoMode(uint32_t mode, uint32_t offset = 0) override; + + uint32_t GetRegisterInfoMode() const override; + + bool RegisterInfoModeIsValid(uint32_t mode) { + if (mode >= eRegisterInfoModeAArch64 && + mode <= eRegisterInfoModeAArch64SVEMax) + return true; + return false; + } + private: + typedef std::vector dynamic_register_infos; + typedef std::map per_mode_register_infos; + + dynamic_register_infos m_dynamic_register_infos = {}; + per_mode_register_infos m_per_vl_reg_infos; + + uint32_t m_reg_info_mode = eRegisterInfoModeAArch64; + const lldb_private::RegisterInfo *m_register_info_p; uint32_t m_register_info_count; }; Index: lldb/source/Plugins/Process/Utility/RegisterInfoPOSIX_arm64.cpp =================================================================== --- lldb/source/Plugins/Process/Utility/RegisterInfoPOSIX_arm64.cpp +++ lldb/source/Plugins/Process/Utility/RegisterInfoPOSIX_arm64.cpp @@ -25,10 +25,29 @@ (LLVM_EXTENSION offsetof(RegisterInfoPOSIX_arm64::FPU, reg) + \ sizeof(RegisterInfoPOSIX_arm64::GPR)) +// This information is based on AArch64 with SVE architecture reference manual. +// AArch64 with SVE has 32 Z and 16 P vector registers. There is also an FFR +// (First Fault) register and a VG (Vector Granule) pseudo register. + +// SVE 16-byte quad word is the basic unit of expansion in vector length. +#define SVE_QUAD_WORD_BYTES 16 + +// Vector length is the multiplier which decides the no of quad words, +// (multiples of 128-bits or 16-bytes) present in a Z register. Vector length +// is decided during execution and can change at runtime. SVE AArch64 register +// infos have modes one for each valid value of vector length. A change in +// vector length requires register context to update sizes of SVE Z, P and FFR. +// Also register context needs to update byte offsets of all registers affected +// by the change in vector length. +#define SVE_REGS_DEFAULT_OFFSET_LINUX sizeof(RegisterInfoPOSIX_arm64::GPR) + +#define SVE_OFFSET_VG SVE_REGS_DEFAULT_OFFSET_LINUX + #define EXC_OFFSET_NAME(reg) \ (LLVM_EXTENSION offsetof(RegisterInfoPOSIX_arm64::EXC, reg) + \ sizeof(RegisterInfoPOSIX_arm64::GPR) + \ sizeof(RegisterInfoPOSIX_arm64::FPU)) + #define DBG_OFFSET_NAME(reg) \ (LLVM_EXTENSION offsetof(RegisterInfoPOSIX_arm64::DBG, reg) + \ sizeof(RegisterInfoPOSIX_arm64::GPR) + \ @@ -50,8 +69,11 @@ // Include RegisterInfos_arm64 to declare our g_register_infos_arm64 structure. #define DECLARE_REGISTER_INFOS_ARM64_STRUCT +#define DECLARE_REGISTER_INFOS_ARM64_SVE_STRUCT #include "RegisterInfos_arm64.h" +#include "RegisterInfos_arm64_sve.h" #undef DECLARE_REGISTER_INFOS_ARM64_STRUCT +#undef DECLARE_REGISTER_INFOS_ARM64_SVE_STRUCT static const lldb_private::RegisterInfo * GetRegisterInfoPtr(const lldb_private::ArchSpec &target_arch) { @@ -96,3 +118,87 @@ uint32_t RegisterInfoPOSIX_arm64::GetRegisterCount() const { return m_register_info_count; } + +uint32_t RegisterInfoPOSIX_arm64::SetRegisterInfoMode(uint32_t mode, + uint32_t offset) { + // Register info mode denotes SVE vector length in context of AArch64. + // Register info mode once set to zero permanently selects default static + // AArch64 register info and cannot be changed to SVE. Also if an invalid + // or previously set vector length is passed to this function then it will + // exit immediately with previously set vector length. + if (!RegisterInfoModeIsValid(mode) || m_reg_info_mode == mode) + return m_reg_info_mode; + + if (mode == eRegisterInfoModeAArch64 && + m_reg_info_mode > eRegisterInfoModeAArch64) + mode = eRegisterInfoModeAArch64SVE; + + m_reg_info_mode = mode; + + if (mode == eRegisterInfoModeAArch64) { + m_register_info_count = + static_cast(sizeof(g_register_infos_arm64_le) / + sizeof(g_register_infos_arm64_le[0])); + m_register_info_p = g_register_infos_arm64_le; + + return m_reg_info_mode; + } + + m_dynamic_register_infos.clear(); + + m_register_info_count = + static_cast(sizeof(g_register_infos_arm64_sve_le) / + sizeof(g_register_infos_arm64_sve_le[0])); + + if (m_per_vl_reg_infos.count(mode)) { + m_dynamic_register_infos = m_per_vl_reg_infos.at(mode); + m_register_info_p = &m_dynamic_register_infos[0]; + return m_reg_info_mode; + } + + m_dynamic_register_infos = std::vector( + g_register_infos_arm64_sve_le, + g_register_infos_arm64_sve_le + m_register_info_count); + m_register_info_p = &m_dynamic_register_infos[0]; + + if (!offset) + offset = SVE_REGS_DEFAULT_OFFSET_LINUX; + + m_dynamic_register_infos[sve_vg].byte_offset = offset; + offset += m_dynamic_register_infos[sve_vg].byte_size; + + // Update Z registers size and offset + uint32_t s_reg_base = fpu_s0; + uint32_t d_reg_base = fpu_d0; + uint32_t v_reg_base = fpu_v0; + uint32_t z_reg_base = sve_z0; + + for (uint32_t index = 0; index < 32; index++) { + m_dynamic_register_infos[s_reg_base + index].byte_offset = offset; + m_dynamic_register_infos[d_reg_base + index].byte_offset = offset; + m_dynamic_register_infos[v_reg_base + index].byte_offset = offset; + m_dynamic_register_infos[z_reg_base + index].byte_offset = offset; + + m_dynamic_register_infos[z_reg_base + index].byte_size = + mode * SVE_QUAD_WORD_BYTES; + offset += m_dynamic_register_infos[z_reg_base + index].byte_size; + } + + // Update P registers and FFR size and offset + for (uint32_t it = sve_p0; it <= sve_ffr; it++) { + m_dynamic_register_infos[it].byte_offset = offset; + m_dynamic_register_infos[it].byte_size = mode * SVE_QUAD_WORD_BYTES / 8; + offset += m_dynamic_register_infos[it].byte_size; + } + + m_dynamic_register_infos[fpu_fpsr].byte_offset = offset; + m_dynamic_register_infos[fpu_fpcr].byte_offset = offset + 4; + + m_per_vl_reg_infos.insert(std::make_pair(mode, m_dynamic_register_infos)); + + return m_reg_info_mode; +} + +uint32_t RegisterInfoPOSIX_arm64::GetRegisterInfoMode() const { + return m_reg_info_mode; +} Index: lldb/source/Plugins/Process/Utility/RegisterInfos_arm64.h =================================================================== --- lldb/source/Plugins/Process/Utility/RegisterInfos_arm64.h +++ lldb/source/Plugins/Process/Utility/RegisterInfos_arm64.h @@ -294,6 +294,65 @@ dbg_wcr14, dbg_wcr15, +#ifdef DECLARE_REGISTER_INFOS_ARM64_SVE_STRUCT + sve_fpsr = fpu_fpsr, + sve_fpcr = fpu_fpcr, + + sve_vg = dbg_wcr15 + 1, + + sve_z0, + sve_z1, + sve_z2, + sve_z3, + sve_z4, + sve_z5, + sve_z6, + sve_z7, + sve_z8, + sve_z9, + sve_z10, + sve_z11, + sve_z12, + sve_z13, + sve_z14, + sve_z15, + sve_z16, + sve_z17, + sve_z18, + sve_z19, + sve_z20, + sve_z21, + sve_z22, + sve_z23, + sve_z24, + sve_z25, + sve_z26, + sve_z27, + sve_z28, + sve_z29, + sve_z30, + sve_z31, + + sve_p0, + sve_p1, + sve_p2, + sve_p3, + sve_p4, + sve_p5, + sve_p6, + sve_p7, + sve_p8, + sve_p9, + sve_p10, + sve_p11, + sve_p12, + sve_p13, + sve_p14, + sve_p15, + + sve_ffr, + +#endif k_num_registers }; Index: lldb/source/Plugins/Process/Utility/RegisterInfos_arm64_sve.h =================================================================== --- /dev/null +++ lldb/source/Plugins/Process/Utility/RegisterInfos_arm64_sve.h @@ -0,0 +1,649 @@ +//===-- RegisterInfos_arm64_sve.h -------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifdef DECLARE_REGISTER_INFOS_ARM64_SVE_STRUCT + +#ifndef SVE_OFFSET_VG +#error SVE_OFFSET_VG must be defined before including this header file +#endif + +static uint32_t g_sve_s0_invalidates[] = {sve_z0, fpu_v0, fpu_d0, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s1_invalidates[] = {sve_z1, fpu_v1, fpu_d1, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s2_invalidates[] = {sve_z2, fpu_v2, fpu_d2, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s3_invalidates[] = {sve_z3, fpu_v3, fpu_d3, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s4_invalidates[] = {sve_z4, fpu_v4, fpu_d4, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s5_invalidates[] = {sve_z5, fpu_v5, fpu_d5, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s6_invalidates[] = {sve_z6, fpu_v6, fpu_d6, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s7_invalidates[] = {sve_z7, fpu_v7, fpu_d7, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s8_invalidates[] = {sve_z8, fpu_v8, fpu_d8, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s9_invalidates[] = {sve_z9, fpu_v9, fpu_d9, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s10_invalidates[] = {sve_z10, fpu_v10, fpu_d10, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s11_invalidates[] = {sve_z11, fpu_v11, fpu_d11, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s12_invalidates[] = {sve_z12, fpu_v12, fpu_d12, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s13_invalidates[] = {sve_z13, fpu_v13, fpu_d13, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s14_invalidates[] = {sve_z14, fpu_v14, fpu_d14, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s15_invalidates[] = {sve_z15, fpu_v15, fpu_d15, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s16_invalidates[] = {sve_z16, fpu_v16, fpu_d16, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s17_invalidates[] = {sve_z17, fpu_v17, fpu_d17, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s18_invalidates[] = {sve_z18, fpu_v18, fpu_d18, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s19_invalidates[] = {sve_z19, fpu_v19, fpu_d19, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s20_invalidates[] = {sve_z20, fpu_v20, fpu_d20, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s21_invalidates[] = {sve_z21, fpu_v21, fpu_d21, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s22_invalidates[] = {sve_z22, fpu_v22, fpu_d22, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s23_invalidates[] = {sve_z23, fpu_v23, fpu_d23, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s24_invalidates[] = {sve_z24, fpu_v24, fpu_d24, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s25_invalidates[] = {sve_z25, fpu_v25, fpu_d25, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s26_invalidates[] = {sve_z26, fpu_v26, fpu_d26, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s27_invalidates[] = {sve_z27, fpu_v27, fpu_d27, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s28_invalidates[] = {sve_z28, fpu_v28, fpu_d28, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s29_invalidates[] = {sve_z29, fpu_v29, fpu_d29, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s30_invalidates[] = {sve_z30, fpu_v30, fpu_d30, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_s31_invalidates[] = {sve_z31, fpu_v31, fpu_d31, + LLDB_INVALID_REGNUM}; + +static uint32_t g_sve_d0_invalidates[] = {sve_z0, fpu_v0, fpu_s0, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d1_invalidates[] = {sve_z1, fpu_v1, fpu_s1, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d2_invalidates[] = {sve_z2, fpu_v2, fpu_s2, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d3_invalidates[] = {sve_z3, fpu_v3, fpu_s3, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d4_invalidates[] = {sve_z4, fpu_v4, fpu_s4, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d5_invalidates[] = {sve_z5, fpu_v5, fpu_s5, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d6_invalidates[] = {sve_z6, fpu_v6, fpu_s6, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d7_invalidates[] = {sve_z7, fpu_v7, fpu_s7, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d8_invalidates[] = {sve_z8, fpu_v8, fpu_s8, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d9_invalidates[] = {sve_z9, fpu_v9, fpu_s9, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d10_invalidates[] = {sve_z10, fpu_v10, fpu_s10, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d11_invalidates[] = {sve_z11, fpu_v11, fpu_s11, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d12_invalidates[] = {sve_z12, fpu_v12, fpu_s12, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d13_invalidates[] = {sve_z13, fpu_v13, fpu_s13, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d14_invalidates[] = {sve_z14, fpu_v14, fpu_s14, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d15_invalidates[] = {sve_z15, fpu_v15, fpu_s15, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d16_invalidates[] = {sve_z16, fpu_v16, fpu_s16, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d17_invalidates[] = {sve_z17, fpu_v17, fpu_s17, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d18_invalidates[] = {sve_z18, fpu_v18, fpu_s18, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d19_invalidates[] = {sve_z19, fpu_v19, fpu_s19, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d20_invalidates[] = {sve_z20, fpu_v20, fpu_s20, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d21_invalidates[] = {sve_z21, fpu_v21, fpu_s21, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d22_invalidates[] = {sve_z22, fpu_v22, fpu_s22, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d23_invalidates[] = {sve_z23, fpu_v23, fpu_s23, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d24_invalidates[] = {sve_z24, fpu_v24, fpu_s24, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d25_invalidates[] = {sve_z25, fpu_v25, fpu_s25, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d26_invalidates[] = {sve_z26, fpu_v26, fpu_s26, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d27_invalidates[] = {sve_z27, fpu_v27, fpu_s27, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d28_invalidates[] = {sve_z28, fpu_v28, fpu_s28, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d29_invalidates[] = {sve_z29, fpu_v29, fpu_s29, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d30_invalidates[] = {sve_z30, fpu_v30, fpu_s30, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_d31_invalidates[] = {sve_z31, fpu_v31, fpu_s31, + LLDB_INVALID_REGNUM}; + +static uint32_t g_sve_v0_invalidates[] = {sve_z0, fpu_d0, fpu_s0, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v1_invalidates[] = {sve_z1, fpu_d1, fpu_s1, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v2_invalidates[] = {sve_z2, fpu_d2, fpu_s2, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v3_invalidates[] = {sve_z3, fpu_d3, fpu_s3, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v4_invalidates[] = {sve_z4, fpu_d4, fpu_s4, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v5_invalidates[] = {sve_z5, fpu_d5, fpu_s5, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v6_invalidates[] = {sve_z6, fpu_d6, fpu_s6, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v7_invalidates[] = {sve_z7, fpu_d7, fpu_s7, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v8_invalidates[] = {sve_z8, fpu_d8, fpu_s8, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v9_invalidates[] = {sve_z9, fpu_d9, fpu_s9, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v10_invalidates[] = {sve_z10, fpu_d10, fpu_s10, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v11_invalidates[] = {sve_z11, fpu_d11, fpu_s11, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v12_invalidates[] = {sve_z12, fpu_d12, fpu_s12, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v13_invalidates[] = {sve_z13, fpu_d13, fpu_s13, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v14_invalidates[] = {sve_z14, fpu_d14, fpu_s14, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v15_invalidates[] = {sve_z15, fpu_d15, fpu_s15, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v16_invalidates[] = {sve_z16, fpu_d16, fpu_s16, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v17_invalidates[] = {sve_z17, fpu_d17, fpu_s17, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v18_invalidates[] = {sve_z18, fpu_d18, fpu_s18, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v19_invalidates[] = {sve_z19, fpu_d19, fpu_s19, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v20_invalidates[] = {sve_z20, fpu_d20, fpu_s20, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v21_invalidates[] = {sve_z21, fpu_d21, fpu_s21, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v22_invalidates[] = {sve_z22, fpu_d22, fpu_s22, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v23_invalidates[] = {sve_z23, fpu_d23, fpu_s23, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v24_invalidates[] = {sve_z24, fpu_d24, fpu_s24, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v25_invalidates[] = {sve_z25, fpu_d25, fpu_s25, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v26_invalidates[] = {sve_z26, fpu_d26, fpu_s26, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v27_invalidates[] = {sve_z27, fpu_d27, fpu_s27, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v28_invalidates[] = {sve_z28, fpu_d28, fpu_s28, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v29_invalidates[] = {sve_z29, fpu_d29, fpu_s29, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v30_invalidates[] = {sve_z30, fpu_d30, fpu_s30, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_v31_invalidates[] = {sve_z31, fpu_d31, fpu_s31, + LLDB_INVALID_REGNUM}; + +static uint32_t g_sve_z0_invalidates[] = {fpu_v0, fpu_d0, fpu_s0, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z1_invalidates[] = {fpu_v1, fpu_d1, fpu_s1, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z2_invalidates[] = {fpu_v2, fpu_d2, fpu_s2, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z3_invalidates[] = {fpu_v3, fpu_d3, fpu_s3, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z4_invalidates[] = {fpu_v4, fpu_d4, fpu_s4, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z5_invalidates[] = {fpu_v5, fpu_d5, fpu_s5, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z6_invalidates[] = {fpu_v6, fpu_d6, fpu_s6, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z7_invalidates[] = {fpu_v7, fpu_d7, fpu_s7, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z8_invalidates[] = {fpu_v8, fpu_d8, fpu_s8, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z9_invalidates[] = {fpu_v9, fpu_d9, fpu_s9, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z10_invalidates[] = {fpu_v10, fpu_d10, fpu_s10, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z11_invalidates[] = {fpu_v11, fpu_d11, fpu_s11, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z12_invalidates[] = {fpu_v12, fpu_d12, fpu_s12, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z13_invalidates[] = {fpu_v13, fpu_d13, fpu_s13, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z14_invalidates[] = {fpu_v14, fpu_d14, fpu_s14, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z15_invalidates[] = {fpu_v15, fpu_d15, fpu_s15, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z16_invalidates[] = {fpu_v16, fpu_d16, fpu_s16, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z17_invalidates[] = {fpu_v17, fpu_d17, fpu_s17, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z18_invalidates[] = {fpu_v18, fpu_d18, fpu_s18, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z19_invalidates[] = {fpu_v19, fpu_d19, fpu_s19, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z20_invalidates[] = {fpu_v20, fpu_d20, fpu_s20, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z21_invalidates[] = {fpu_v21, fpu_d21, fpu_s21, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z22_invalidates[] = {fpu_v22, fpu_d22, fpu_s22, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z23_invalidates[] = {fpu_v23, fpu_d23, fpu_s23, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z24_invalidates[] = {fpu_v24, fpu_d24, fpu_s24, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z25_invalidates[] = {fpu_v25, fpu_d25, fpu_s25, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z26_invalidates[] = {fpu_v26, fpu_d26, fpu_s26, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z27_invalidates[] = {fpu_v27, fpu_d27, fpu_s27, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z28_invalidates[] = {fpu_v28, fpu_d28, fpu_s28, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z29_invalidates[] = {fpu_v29, fpu_d29, fpu_s29, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z30_invalidates[] = {fpu_v30, fpu_d30, fpu_s30, + LLDB_INVALID_REGNUM}; +static uint32_t g_sve_z31_invalidates[] = {fpu_v31, fpu_d31, fpu_s31, + LLDB_INVALID_REGNUM}; + +static uint32_t g_contained_z0[] = {sve_z0, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z1[] = {sve_z1, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z2[] = {sve_z2, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z3[] = {sve_z3, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z4[] = {sve_z4, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z5[] = {sve_z5, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z6[] = {sve_z6, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z7[] = {sve_z7, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z8[] = {sve_z8, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z9[] = {sve_z9, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z10[] = {sve_z10, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z11[] = {sve_z11, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z12[] = {sve_z12, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z13[] = {sve_z13, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z14[] = {sve_z14, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z15[] = {sve_z15, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z16[] = {sve_z16, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z17[] = {sve_z17, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z18[] = {sve_z18, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z19[] = {sve_z19, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z20[] = {sve_z20, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z21[] = {sve_z21, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z22[] = {sve_z22, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z23[] = {sve_z23, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z24[] = {sve_z24, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z25[] = {sve_z25, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z26[] = {sve_z26, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z27[] = {sve_z27, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z28[] = {sve_z28, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z29[] = {sve_z29, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z30[] = {sve_z30, LLDB_INVALID_REGNUM}; +static uint32_t g_contained_z31[] = {sve_z31, LLDB_INVALID_REGNUM}; + +#define VG_OFFSET_NAME(reg) SVE_OFFSET_VG + +#define SVE_REG_KIND(reg) MISC_KIND_GENERIC(reg, sve, LLDB_INVALID_REGNUM) +#define MISC_VG_KIND(lldb_kind) MISC_KIND_GENERIC(vg, sve, LLDB_INVALID_REGNUM) + +// Default offset SVE Z registers and all corresponding pseudo registers +// ( S, D and V registers) is zero and will be configured during execution. + +// Defines sve pseudo vector (V) register with 16-byte size +#define DEFINE_VREG_SVE(vreg, zreg) \ + { \ + #vreg, nullptr, 16, 0, lldb::eEncodingVector, lldb::eFormatVectorOfUInt8, \ + VREG_KIND(vreg), g_contained_##zreg, g_sve_##vreg##_invalidates, \ + nullptr, 0 \ + } + +// Defines S and D pseudo registers mapping over corresponding vector register +#define DEFINE_FPU_PSEUDO_SVE(reg, size, zreg) \ + { \ + #reg, nullptr, size, 0, lldb::eEncodingIEEE754, lldb::eFormatFloat, \ + MISC_KIND(fpu_##reg), g_contained_##zreg, g_sve_##reg##_invalidates, \ + nullptr, 0 \ + } + +// Defines a Z vector register with 16-byte default size +#define DEFINE_ZREG(reg) \ + { \ + #reg, nullptr, 16, 0, lldb::eEncodingVector, lldb::eFormatVectorOfUInt8, \ + SVE_REG_KIND(reg), nullptr, g_sve_##reg##_invalidates, nullptr, 0 \ + } + +// Defines a P vector register with 2-byte default size +#define DEFINE_PREG(reg) \ + { \ + #reg, nullptr, 2, 0, lldb::eEncodingVector, lldb::eFormatVectorOfUInt8, \ + SVE_REG_KIND(reg), nullptr, nullptr, nullptr, 0 \ + } + +static lldb_private::RegisterInfo g_register_infos_arm64_sve_le[] = { + // DEFINE_GPR64(name, GENERIC KIND) + DEFINE_GPR64(x0, nullptr, LLDB_REGNUM_GENERIC_ARG1, g_x0_invalidates), + DEFINE_GPR64(x1, nullptr, LLDB_REGNUM_GENERIC_ARG2, g_x1_invalidates), + DEFINE_GPR64(x2, nullptr, LLDB_REGNUM_GENERIC_ARG3, g_x2_invalidates), + DEFINE_GPR64(x3, nullptr, LLDB_REGNUM_GENERIC_ARG4, g_x3_invalidates), + DEFINE_GPR64(x4, nullptr, LLDB_REGNUM_GENERIC_ARG5, g_x4_invalidates), + DEFINE_GPR64(x5, nullptr, LLDB_REGNUM_GENERIC_ARG6, g_x5_invalidates), + DEFINE_GPR64(x6, nullptr, LLDB_REGNUM_GENERIC_ARG7, g_x6_invalidates), + DEFINE_GPR64(x7, nullptr, LLDB_REGNUM_GENERIC_ARG8, g_x7_invalidates), + DEFINE_GPR64(x8, nullptr, LLDB_INVALID_REGNUM, g_x8_invalidates), + DEFINE_GPR64(x9, nullptr, LLDB_INVALID_REGNUM, g_x9_invalidates), + DEFINE_GPR64(x10, nullptr, LLDB_INVALID_REGNUM, g_x10_invalidates), + DEFINE_GPR64(x11, nullptr, LLDB_INVALID_REGNUM, g_x11_invalidates), + DEFINE_GPR64(x12, nullptr, LLDB_INVALID_REGNUM, g_x12_invalidates), + DEFINE_GPR64(x13, nullptr, LLDB_INVALID_REGNUM, g_x13_invalidates), + DEFINE_GPR64(x14, nullptr, LLDB_INVALID_REGNUM, g_x14_invalidates), + DEFINE_GPR64(x15, nullptr, LLDB_INVALID_REGNUM, g_x15_invalidates), + DEFINE_GPR64(x16, nullptr, LLDB_INVALID_REGNUM, g_x16_invalidates), + DEFINE_GPR64(x17, nullptr, LLDB_INVALID_REGNUM, g_x17_invalidates), + DEFINE_GPR64(x18, nullptr, LLDB_INVALID_REGNUM, g_x18_invalidates), + DEFINE_GPR64(x19, nullptr, LLDB_INVALID_REGNUM, g_x19_invalidates), + DEFINE_GPR64(x20, nullptr, LLDB_INVALID_REGNUM, g_x20_invalidates), + DEFINE_GPR64(x21, nullptr, LLDB_INVALID_REGNUM, g_x21_invalidates), + DEFINE_GPR64(x22, nullptr, LLDB_INVALID_REGNUM, g_x22_invalidates), + DEFINE_GPR64(x23, nullptr, LLDB_INVALID_REGNUM, g_x23_invalidates), + DEFINE_GPR64(x24, nullptr, LLDB_INVALID_REGNUM, g_x24_invalidates), + DEFINE_GPR64(x25, nullptr, LLDB_INVALID_REGNUM, g_x25_invalidates), + DEFINE_GPR64(x26, nullptr, LLDB_INVALID_REGNUM, g_x26_invalidates), + DEFINE_GPR64(x27, nullptr, LLDB_INVALID_REGNUM, g_x27_invalidates), + DEFINE_GPR64(x28, nullptr, LLDB_INVALID_REGNUM, g_x28_invalidates), + // DEFINE_GPR64(name, GENERIC KIND) + DEFINE_GPR64(fp, STRINGIZE(x29), LLDB_REGNUM_GENERIC_FP, nullptr), + DEFINE_GPR64(lr, STRINGIZE(x30), LLDB_REGNUM_GENERIC_RA, nullptr), + DEFINE_GPR64(sp, STRINGIZE(x31), LLDB_REGNUM_GENERIC_SP, nullptr), + DEFINE_GPR64(pc, nullptr, LLDB_REGNUM_GENERIC_PC, nullptr), + + // DEFINE_MISC_REGS(name, size, TYPE, lldb kind) + DEFINE_MISC_REGS(cpsr, 4, CPSR, gpr_cpsr), + + // DEFINE_GPR32(name, parent name) + DEFINE_GPR32(w0, x0), + DEFINE_GPR32(w1, x1), + DEFINE_GPR32(w2, x2), + DEFINE_GPR32(w3, x3), + DEFINE_GPR32(w4, x4), + DEFINE_GPR32(w5, x5), + DEFINE_GPR32(w6, x6), + DEFINE_GPR32(w7, x7), + DEFINE_GPR32(w8, x8), + DEFINE_GPR32(w9, x9), + DEFINE_GPR32(w10, x10), + DEFINE_GPR32(w11, x11), + DEFINE_GPR32(w12, x12), + DEFINE_GPR32(w13, x13), + DEFINE_GPR32(w14, x14), + DEFINE_GPR32(w15, x15), + DEFINE_GPR32(w16, x16), + DEFINE_GPR32(w17, x17), + DEFINE_GPR32(w18, x18), + DEFINE_GPR32(w19, x19), + DEFINE_GPR32(w20, x20), + DEFINE_GPR32(w21, x21), + DEFINE_GPR32(w22, x22), + DEFINE_GPR32(w23, x23), + DEFINE_GPR32(w24, x24), + DEFINE_GPR32(w25, x25), + DEFINE_GPR32(w26, x26), + DEFINE_GPR32(w27, x27), + DEFINE_GPR32(w28, x28), + + // DEFINE_VREG_SVE(v register, z register) + DEFINE_VREG_SVE(v0, z0), + DEFINE_VREG_SVE(v1, z1), + DEFINE_VREG_SVE(v2, z2), + DEFINE_VREG_SVE(v3, z3), + DEFINE_VREG_SVE(v4, z4), + DEFINE_VREG_SVE(v5, z5), + DEFINE_VREG_SVE(v6, z6), + DEFINE_VREG_SVE(v7, z7), + DEFINE_VREG_SVE(v8, z8), + DEFINE_VREG_SVE(v9, z9), + DEFINE_VREG_SVE(v10, z10), + DEFINE_VREG_SVE(v11, z11), + DEFINE_VREG_SVE(v12, z12), + DEFINE_VREG_SVE(v13, z13), + DEFINE_VREG_SVE(v14, z14), + DEFINE_VREG_SVE(v15, z15), + DEFINE_VREG_SVE(v16, z16), + DEFINE_VREG_SVE(v17, z17), + DEFINE_VREG_SVE(v18, z18), + DEFINE_VREG_SVE(v19, z19), + DEFINE_VREG_SVE(v20, z20), + DEFINE_VREG_SVE(v21, z21), + DEFINE_VREG_SVE(v22, z22), + DEFINE_VREG_SVE(v23, z23), + DEFINE_VREG_SVE(v24, z24), + DEFINE_VREG_SVE(v25, z25), + DEFINE_VREG_SVE(v26, z26), + DEFINE_VREG_SVE(v27, z27), + DEFINE_VREG_SVE(v28, z28), + DEFINE_VREG_SVE(v29, z29), + DEFINE_VREG_SVE(v30, z30), + DEFINE_VREG_SVE(v31, z31), + + // DEFINE_FPU_PSEUDO(name, size, ENDIAN OFFSET, parent register) + DEFINE_FPU_PSEUDO_SVE(s0, 4, z0), + DEFINE_FPU_PSEUDO_SVE(s1, 4, z1), + DEFINE_FPU_PSEUDO_SVE(s2, 4, z2), + DEFINE_FPU_PSEUDO_SVE(s3, 4, z3), + DEFINE_FPU_PSEUDO_SVE(s4, 4, z4), + DEFINE_FPU_PSEUDO_SVE(s5, 4, z5), + DEFINE_FPU_PSEUDO_SVE(s6, 4, z6), + DEFINE_FPU_PSEUDO_SVE(s7, 4, z7), + DEFINE_FPU_PSEUDO_SVE(s8, 4, z8), + DEFINE_FPU_PSEUDO_SVE(s9, 4, z9), + DEFINE_FPU_PSEUDO_SVE(s10, 4, z10), + DEFINE_FPU_PSEUDO_SVE(s11, 4, z11), + DEFINE_FPU_PSEUDO_SVE(s12, 4, z12), + DEFINE_FPU_PSEUDO_SVE(s13, 4, z13), + DEFINE_FPU_PSEUDO_SVE(s14, 4, z14), + DEFINE_FPU_PSEUDO_SVE(s15, 4, z15), + DEFINE_FPU_PSEUDO_SVE(s16, 4, z16), + DEFINE_FPU_PSEUDO_SVE(s17, 4, z17), + DEFINE_FPU_PSEUDO_SVE(s18, 4, z18), + DEFINE_FPU_PSEUDO_SVE(s19, 4, z19), + DEFINE_FPU_PSEUDO_SVE(s20, 4, z20), + DEFINE_FPU_PSEUDO_SVE(s21, 4, z21), + DEFINE_FPU_PSEUDO_SVE(s22, 4, z22), + DEFINE_FPU_PSEUDO_SVE(s23, 4, z23), + DEFINE_FPU_PSEUDO_SVE(s24, 4, z24), + DEFINE_FPU_PSEUDO_SVE(s25, 4, z25), + DEFINE_FPU_PSEUDO_SVE(s26, 4, z26), + DEFINE_FPU_PSEUDO_SVE(s27, 4, z27), + DEFINE_FPU_PSEUDO_SVE(s28, 4, z28), + DEFINE_FPU_PSEUDO_SVE(s29, 4, z29), + DEFINE_FPU_PSEUDO_SVE(s30, 4, z30), + DEFINE_FPU_PSEUDO_SVE(s31, 4, z31), + + DEFINE_FPU_PSEUDO_SVE(d0, 8, z0), + DEFINE_FPU_PSEUDO_SVE(d1, 8, z1), + DEFINE_FPU_PSEUDO_SVE(d2, 8, z2), + DEFINE_FPU_PSEUDO_SVE(d3, 8, z3), + DEFINE_FPU_PSEUDO_SVE(d4, 8, z4), + DEFINE_FPU_PSEUDO_SVE(d5, 8, z5), + DEFINE_FPU_PSEUDO_SVE(d6, 8, z6), + DEFINE_FPU_PSEUDO_SVE(d7, 8, z7), + DEFINE_FPU_PSEUDO_SVE(d8, 8, z8), + DEFINE_FPU_PSEUDO_SVE(d9, 8, z9), + DEFINE_FPU_PSEUDO_SVE(d10, 8, z10), + DEFINE_FPU_PSEUDO_SVE(d11, 8, z11), + DEFINE_FPU_PSEUDO_SVE(d12, 8, z12), + DEFINE_FPU_PSEUDO_SVE(d13, 8, z13), + DEFINE_FPU_PSEUDO_SVE(d14, 8, z14), + DEFINE_FPU_PSEUDO_SVE(d15, 8, z15), + DEFINE_FPU_PSEUDO_SVE(d16, 8, z16), + DEFINE_FPU_PSEUDO_SVE(d17, 8, z17), + DEFINE_FPU_PSEUDO_SVE(d18, 8, z18), + DEFINE_FPU_PSEUDO_SVE(d19, 8, z19), + DEFINE_FPU_PSEUDO_SVE(d20, 8, z20), + DEFINE_FPU_PSEUDO_SVE(d21, 8, z21), + DEFINE_FPU_PSEUDO_SVE(d22, 8, z22), + DEFINE_FPU_PSEUDO_SVE(d23, 8, z23), + DEFINE_FPU_PSEUDO_SVE(d24, 8, z24), + DEFINE_FPU_PSEUDO_SVE(d25, 8, z25), + DEFINE_FPU_PSEUDO_SVE(d26, 8, z26), + DEFINE_FPU_PSEUDO_SVE(d27, 8, z27), + DEFINE_FPU_PSEUDO_SVE(d28, 8, z28), + DEFINE_FPU_PSEUDO_SVE(d29, 8, z29), + DEFINE_FPU_PSEUDO_SVE(d30, 8, z30), + DEFINE_FPU_PSEUDO_SVE(d31, 8, z31), + + // DEFINE_MISC_REGS(name, size, TYPE, lldb kind) + DEFINE_MISC_REGS(fpsr, 4, FPU, fpu_fpsr), + DEFINE_MISC_REGS(fpcr, 4, FPU, fpu_fpcr), + DEFINE_MISC_REGS(far, 8, EXC, exc_far), + DEFINE_MISC_REGS(esr, 4, EXC, exc_esr), + DEFINE_MISC_REGS(exception, 4, EXC, exc_exception), + + {DEFINE_DBG(bvr, 0)}, + {DEFINE_DBG(bvr, 1)}, + {DEFINE_DBG(bvr, 2)}, + {DEFINE_DBG(bvr, 3)}, + {DEFINE_DBG(bvr, 4)}, + {DEFINE_DBG(bvr, 5)}, + {DEFINE_DBG(bvr, 6)}, + {DEFINE_DBG(bvr, 7)}, + {DEFINE_DBG(bvr, 8)}, + {DEFINE_DBG(bvr, 9)}, + {DEFINE_DBG(bvr, 10)}, + {DEFINE_DBG(bvr, 11)}, + {DEFINE_DBG(bvr, 12)}, + {DEFINE_DBG(bvr, 13)}, + {DEFINE_DBG(bvr, 14)}, + {DEFINE_DBG(bvr, 15)}, + + {DEFINE_DBG(bcr, 0)}, + {DEFINE_DBG(bcr, 1)}, + {DEFINE_DBG(bcr, 2)}, + {DEFINE_DBG(bcr, 3)}, + {DEFINE_DBG(bcr, 4)}, + {DEFINE_DBG(bcr, 5)}, + {DEFINE_DBG(bcr, 6)}, + {DEFINE_DBG(bcr, 7)}, + {DEFINE_DBG(bcr, 8)}, + {DEFINE_DBG(bcr, 9)}, + {DEFINE_DBG(bcr, 10)}, + {DEFINE_DBG(bcr, 11)}, + {DEFINE_DBG(bcr, 12)}, + {DEFINE_DBG(bcr, 13)}, + {DEFINE_DBG(bcr, 14)}, + {DEFINE_DBG(bcr, 15)}, + + {DEFINE_DBG(wvr, 0)}, + {DEFINE_DBG(wvr, 1)}, + {DEFINE_DBG(wvr, 2)}, + {DEFINE_DBG(wvr, 3)}, + {DEFINE_DBG(wvr, 4)}, + {DEFINE_DBG(wvr, 5)}, + {DEFINE_DBG(wvr, 6)}, + {DEFINE_DBG(wvr, 7)}, + {DEFINE_DBG(wvr, 8)}, + {DEFINE_DBG(wvr, 9)}, + {DEFINE_DBG(wvr, 10)}, + {DEFINE_DBG(wvr, 11)}, + {DEFINE_DBG(wvr, 12)}, + {DEFINE_DBG(wvr, 13)}, + {DEFINE_DBG(wvr, 14)}, + {DEFINE_DBG(wvr, 15)}, + + {DEFINE_DBG(wcr, 0)}, + {DEFINE_DBG(wcr, 1)}, + {DEFINE_DBG(wcr, 2)}, + {DEFINE_DBG(wcr, 3)}, + {DEFINE_DBG(wcr, 4)}, + {DEFINE_DBG(wcr, 5)}, + {DEFINE_DBG(wcr, 6)}, + {DEFINE_DBG(wcr, 7)}, + {DEFINE_DBG(wcr, 8)}, + {DEFINE_DBG(wcr, 9)}, + {DEFINE_DBG(wcr, 10)}, + {DEFINE_DBG(wcr, 11)}, + {DEFINE_DBG(wcr, 12)}, + {DEFINE_DBG(wcr, 13)}, + {DEFINE_DBG(wcr, 14)}, + {DEFINE_DBG(wcr, 15)}, + + DEFINE_MISC_REGS(vg, 8, VG, sve_vg), + // DEFINE_ZREG(name) + DEFINE_ZREG(z0), + DEFINE_ZREG(z1), + DEFINE_ZREG(z2), + DEFINE_ZREG(z3), + DEFINE_ZREG(z4), + DEFINE_ZREG(z5), + DEFINE_ZREG(z6), + DEFINE_ZREG(z7), + DEFINE_ZREG(z8), + DEFINE_ZREG(z9), + DEFINE_ZREG(z10), + DEFINE_ZREG(z11), + DEFINE_ZREG(z12), + DEFINE_ZREG(z13), + DEFINE_ZREG(z14), + DEFINE_ZREG(z15), + DEFINE_ZREG(z16), + DEFINE_ZREG(z17), + DEFINE_ZREG(z18), + DEFINE_ZREG(z19), + DEFINE_ZREG(z20), + DEFINE_ZREG(z21), + DEFINE_ZREG(z22), + DEFINE_ZREG(z23), + DEFINE_ZREG(z24), + DEFINE_ZREG(z25), + DEFINE_ZREG(z26), + DEFINE_ZREG(z27), + DEFINE_ZREG(z28), + DEFINE_ZREG(z29), + DEFINE_ZREG(z30), + DEFINE_ZREG(z31), + + // DEFINE_PREG(name) + DEFINE_PREG(p0), + DEFINE_PREG(p1), + DEFINE_PREG(p2), + DEFINE_PREG(p3), + DEFINE_PREG(p4), + DEFINE_PREG(p5), + DEFINE_PREG(p6), + DEFINE_PREG(p7), + DEFINE_PREG(p8), + DEFINE_PREG(p9), + DEFINE_PREG(p10), + DEFINE_PREG(p11), + DEFINE_PREG(p12), + DEFINE_PREG(p13), + DEFINE_PREG(p14), + DEFINE_PREG(p15), + + // DEFINE FFR + DEFINE_PREG(ffr)}; + +#endif // DECLARE_REGISTER_INFOS_ARM64_SVE_STRUCT Index: lldb/source/Plugins/Process/Utility/lldb-arm64-register-enums.h =================================================================== --- lldb/source/Plugins/Process/Utility/lldb-arm64-register-enums.h +++ lldb/source/Plugins/Process/Utility/lldb-arm64-register-enums.h @@ -255,9 +255,66 @@ dbg_wcr14_arm64, dbg_wcr15_arm64, + k_first_sve_arm64, + sve_vg_arm64 = k_first_sve_arm64, + sve_z0_arm64, + sve_z1_arm64, + sve_z2_arm64, + sve_z3_arm64, + sve_z4_arm64, + sve_z5_arm64, + sve_z6_arm64, + sve_z7_arm64, + sve_z8_arm64, + sve_z9_arm64, + sve_z10_arm64, + sve_z11_arm64, + sve_z12_arm64, + sve_z13_arm64, + sve_z14_arm64, + sve_z15_arm64, + sve_z16_arm64, + sve_z17_arm64, + sve_z18_arm64, + sve_z19_arm64, + sve_z20_arm64, + sve_z21_arm64, + sve_z22_arm64, + sve_z23_arm64, + sve_z24_arm64, + sve_z25_arm64, + sve_z26_arm64, + sve_z27_arm64, + sve_z28_arm64, + sve_z29_arm64, + sve_z30_arm64, + sve_z31_arm64, + + sve_p0_arm64, + sve_p1_arm64, + sve_p2_arm64, + sve_p3_arm64, + sve_p4_arm64, + sve_p5_arm64, + sve_p6_arm64, + sve_p7_arm64, + sve_p8_arm64, + sve_p9_arm64, + sve_p10_arm64, + sve_p11_arm64, + sve_p12_arm64, + sve_p13_arm64, + sve_p14_arm64, + sve_p15_arm64, + + sve_ffr_arm64, + k_last_sve_arm64 = sve_ffr_arm64, + k_num_registers_arm64, k_num_gpr_registers_arm64 = k_last_gpr_arm64 - k_first_gpr_arm64 + 1, - k_num_fpr_registers_arm64 = k_last_fpr_arm64 - k_first_fpr_arm64 + 1 + k_num_fpr_registers_arm64 = k_last_fpr_arm64 - k_first_fpr_arm64 + 1, + k_num_sve_registers_arm64 = k_last_sve_arm64 - k_first_sve_arm64 + 1 + }; } Index: lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerLLGS.cpp =================================================================== --- lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerLLGS.cpp +++ lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerLLGS.cpp @@ -2048,7 +2048,7 @@ packet, "P packet missing '=' char after register number"); // Parse out the value. - uint8_t reg_bytes[256]; // big enough to support up to 256 byte AArch64 SVE + uint8_t reg_bytes[kMaxRegisterByteSize]; // big enough to support up to 256 byte AArch64 SVE // registers size_t reg_size = packet.GetHexBytesAvail(reg_bytes); Index: lldb/source/Utility/ARM64_DWARF_Registers.h =================================================================== --- lldb/source/Utility/ARM64_DWARF_Registers.h +++ lldb/source/Utility/ARM64_DWARF_Registers.h @@ -51,7 +51,31 @@ sp = x31, pc = 32, cpsr = 33, - // 34-63 reserved + // 34-45 reserved + + // 64-bit SVE Vector granule pseudo register + vg = 46, + + // VG ́8-bit SVE first fault register + ffr = 47, + + // VG x ́8-bit SVE predicate registers + p0 = 48, + p1, + p2, + p3, + p4, + p5, + p6, + p7, + p8, + p9, + p10, + p11, + p12, + p13, + p14, + p15, // V0-V31 (128 bit vector registers) v0 = 64, @@ -85,9 +109,41 @@ v28, v29, v30, - v31 + v31, - // 96-127 reserved + // VG ́64-bit SVE vector registers + z0 = 96, + z1, + z2, + z3, + z4, + z5, + z6, + z7, + z8, + z9, + z10, + z11, + z12, + z13, + z14, + z15, + z16, + z17, + z18, + z19, + z20, + z21, + z22, + z23, + z24, + z25, + z26, + z27, + z28, + z29, + z30, + z31 }; } // namespace arm64_dwarf Index: lldb/source/Utility/ARM64_ehframe_Registers.h =================================================================== --- lldb/source/Utility/ARM64_ehframe_Registers.h +++ lldb/source/Utility/ARM64_ehframe_Registers.h @@ -49,10 +49,34 @@ lr, // aka x30 sp, // aka x31 aka wzr pc, // value is 32 - cpsr -}; + cpsr, + // 34-45 reserved -enum { + // 64-bit SVE Vector granule pseudo register + vg = 46, + + // VG ́8-bit SVE first fault register + ffr = 47, + + // VG x ́8-bit SVE predicate registers + p0 = 48, + p1, + p2, + p3, + p4, + p5, + p6, + p7, + p8, + p9, + p10, + p11, + p12, + p13, + p14, + p15, + + // V0-V31 (128 bit vector registers) v0 = 64, v1, v2, @@ -84,7 +108,41 @@ v28, v29, v30, - v31 // 95 + v31, + + // VG ́64-bit SVE vector registers + z0 = 96, + z1, + z2, + z3, + z4, + z5, + z6, + z7, + z8, + z9, + z10, + z11, + z12, + z13, + z14, + z15, + z16, + z17, + z18, + z19, + z20, + z21, + z22, + z23, + z24, + z25, + z26, + z27, + z28, + z29, + z30, + z31 }; } Index: lldb/test/API/commands/register/register/aarch64_sve_registers/Makefile =================================================================== --- /dev/null +++ lldb/test/API/commands/register/register/aarch64_sve_registers/Makefile @@ -0,0 +1,5 @@ +C_SOURCES := main.c + +CFLAGS_EXTRAS := -march=armv8-a+sve + +include Makefile.rules Index: lldb/test/API/commands/register/register/aarch64_sve_registers/TestSVERegisters.py =================================================================== --- /dev/null +++ lldb/test/API/commands/register/register/aarch64_sve_registers/TestSVERegisters.py @@ -0,0 +1,128 @@ +""" +Test the AArch64 SVE registers. +""" + +import lldb +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + +class RegisterCommandsTestCase(TestBase): + + def check_sve_register_size(self, set, name, expected): + reg_value = set.GetChildMemberWithName(name) + self.assertTrue(reg_value.IsValid(), + 'Verify we have a register named "%s"' % (name)) + self.assertEqual(reg_value.GetByteSize(), expected, + 'Verify "%s" == %i' % (name, expected)) + + mydir = TestBase.compute_mydir(__file__) + @skipIf + def test_sve_registers_configuration(self): + """Test AArch64 SVE registers size configuration.""" + self.build() + self.line = line_number('main.c', '// Set a break point here.') + + exe = self.getBuildArtifact("a.out") + self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET) + + lldbutil.run_break_set_by_file_and_line(self, "main.c", self.line, num_expected_locations=1) + self.runCmd("run", RUN_SUCCEEDED) + + self.expect("thread backtrace", STOPPED_DUE_TO_BREAKPOINT, + substrs = ["stop reason = breakpoint 1."]) + + target = self.dbg.GetSelectedTarget() + process = target.GetProcess() + thread = process.GetThreadAtIndex(0) + currentFrame = thread.GetFrameAtIndex(0) + + has_sve = False + for registerSet in currentFrame.GetRegisters(): + if 'sve registers' in registerSet.GetName().lower(): + has_sve = True + + if not has_sve: + self.skipTest('SVE registers must be supported.') + + registerSets = process.GetThreadAtIndex(0).GetFrameAtIndex(0).GetRegisters() + + sve_registers = registerSets.GetValueAtIndex(2) + + vg_reg = sve_registers.GetChildMemberWithName("vg") + + vg_reg_value = sve_registers.GetChildMemberWithName("vg").GetValueAsUnsigned() + + z_reg_size = vg_reg_value * 8 + + p_reg_size = z_reg_size / 8 + + for i in range(32): + self.check_sve_register_size(sve_registers, 'z%i' % (i), z_reg_size) + + for i in range(16): + self.check_sve_register_size(sve_registers, 'p%i' % (i), p_reg_size) + + self.check_sve_register_size(sve_registers, 'ffr', p_reg_size) + + mydir = TestBase.compute_mydir(__file__) + @no_debug_info_test + @skipIf + def test_sve_registers_read_write(self): + """Test AArch64 SVE registers read and write.""" + self.build() + self.line = line_number('main.c', '// Set a break point here.') + + exe = self.getBuildArtifact("a.out") + self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET) + + lldbutil.run_break_set_by_file_and_line(self, "main.c", self.line, num_expected_locations=1) + self.runCmd("run", RUN_SUCCEEDED) + + self.expect("thread backtrace", STOPPED_DUE_TO_BREAKPOINT, + substrs = ["stop reason = breakpoint 1."]) + + target = self.dbg.GetSelectedTarget() + process = target.GetProcess() + thread = process.GetThreadAtIndex(0) + currentFrame = thread.GetFrameAtIndex(0) + + has_sve = False + for registerSet in currentFrame.GetRegisters(): + if 'sve registers' in registerSet.GetName().lower(): + has_sve = True + + if not has_sve: + self.skipTest('SVE registers must be supported.') + + registerSets = process.GetThreadAtIndex(0).GetFrameAtIndex(0).GetRegisters() + + sve_registers = registerSets.GetValueAtIndex(2) + + vg_reg = sve_registers.GetChildMemberWithName("vg") + + vg_reg_value = sve_registers.GetChildMemberWithName("vg").GetValueAsUnsigned() + + z_reg_size = vg_reg_value * 8 + + p_reg_size = z_reg_size / 8 + + z_regs_value = '{' + ' '.join(('0x9d' for _ in range(z_reg_size))) + '}' + + p_regs_value = '{' + ' '.join(('0xee' for _ in range(p_reg_size))) + '}' + + for i in range(32): + self.runCmd('register write ' + 'z%i' % (i) + " '" + z_regs_value + "'") + + for i in range(32): + self.expect("register read " + 'z%i' % (i), substrs = [z_regs_value]) + + for i in range(16): + self.runCmd('register write ' + 'p%i' % (i) + " '" + p_regs_value + "'") + + for i in range(16): + self.expect("register read " + 'p%i' % (i), substrs = [p_regs_value]) + + self.runCmd('register write ' + 'ffr ' + "'" + p_regs_value + "'") + + self.expect("register read " + 'ffr', substrs = [p_regs_value]) \ No newline at end of file Index: lldb/test/API/commands/register/register/aarch64_sve_registers/main.c =================================================================== --- /dev/null +++ lldb/test/API/commands/register/register/aarch64_sve_registers/main.c @@ -0,0 +1,5 @@ +int main() { + asm volatile("ptrue p0.s\n\t"); + asm volatile("fcpy z0.s, p0/m, #5.00000000\n\t"); + return 0; // Set a break point here. +} \ No newline at end of file