omjavaid updated this revision to Diff 302794. omjavaid added a comment. This update tries to mitigate effects of fixed offset fields in LLDB register description. We have now replaced Arm64 register infos on lldb-server side to generate offsets which increase with respect to size in increasing order of register numbers.
This patch updates functions where we update offsets to do the same thing on client side whenever offsets are updated we update the whole register infos list adjusting any changes in sizes of Z and P registers in between. On GDB side register sequence starts with GPR and then moves to SVE registers. GDB has composite types and does not use pseudo registers like lldb does. Although we do not support composite register types in LLDB for now but as far as g packet sequencing and resize is concerned our handling now does not depend on any LLDB specific register resize management. I have also updated use of dwarf register numbers and replaced them with register name because GDB/QEMU will not send dwarf register numbers as part of target XML description. CHANGES SINCE LAST ACTION https://reviews.llvm.org/D82863/new/ https://reviews.llvm.org/D82863 Files: lldb/source/Plugins/Process/Linux/NativeRegisterContextLinux_arm64.cpp lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.cpp lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.h lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.cpp lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.h lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp
Index: lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp =================================================================== --- lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp +++ lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp @@ -1761,6 +1761,21 @@ gdb_thread->PrivateSetRegisterValue(pair.first, buffer_sp->GetData()); } + // Code below is specific to AArch64 target in SVE state + // If expedited register set contains vector granule (vg) register + // then thread's register context reconfiguration is triggered by + // calling UpdateARM64SVERegistersInfos. + const ArchSpec &arch = GetTarget().GetArchitecture(); + if (arch.IsValid() && (arch.GetMachine() == llvm::Triple::aarch64 || + arch.GetMachine() == llvm::Triple::aarch64_be)) { + GDBRemoteRegisterContext *reg_ctx_sp = + static_cast<GDBRemoteRegisterContext *>( + gdb_thread->GetRegisterContext().get()); + + if (reg_ctx_sp) + reg_ctx_sp->AArch64SVEReconfigure(); + } + thread_sp->SetName(thread_name.empty() ? nullptr : thread_name.c_str()); gdb_thread->SetThreadDispatchQAddr(thread_dispatch_qaddr); Index: lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.h =================================================================== --- lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.h +++ lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.h @@ -40,6 +40,9 @@ void HardcodeARMRegisters(bool from_scratch); + bool UpdateARM64SVERegistersInfos(uint64_t vg, uint32_t &end_reg_offset, + std::vector<uint32_t> &invalidate_regs); + void CloneFrom(GDBRemoteDynamicRegisterInfoSP process_reginfo); }; @@ -79,6 +82,8 @@ uint32_t ConvertRegisterKindToRegisterNumber(lldb::RegisterKind kind, uint32_t num) override; + bool AArch64SVEReconfigure(); + protected: friend class ThreadGDBRemote; Index: lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.cpp =================================================================== --- lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.cpp +++ lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.cpp @@ -213,8 +213,8 @@ for (int i = 0; i < regcount; i++) { struct RegisterInfo *reginfo = m_reg_info_sp->GetRegisterInfoAtIndex(i); - if (reginfo->byte_offset + reginfo->byte_size - <= buffer_sp->GetByteSize()) { + if (reginfo->byte_offset + reginfo->byte_size <= + buffer_sp->GetByteSize()) { m_reg_valid[i] = true; } else { m_reg_valid[i] = false; @@ -343,6 +343,17 @@ if (dst == nullptr) return false; + // Code below is specific to AArch64 target in SVE state + // If vector granule (vg) register is being written then thread's + // register context reconfiguration is triggered on success. + bool do_reconfigure_arm64_sve = false; + const ArchSpec &arch = process->GetTarget().GetArchitecture(); + if (arch.IsValid() && (arch.GetMachine() == llvm::Triple::aarch64 || + arch.GetMachine() == llvm::Triple::aarch64_be)) { + if (strcmp(reg_info->name, "vg") == 0) + do_reconfigure_arm64_sve = true; + } + if (data.CopyByteOrderedData(data_offset, // src offset reg_info->byte_size, // src length dst, // dst @@ -362,6 +373,11 @@ { SetAllRegisterValid(false); + + if (do_reconfigure_arm64_sve && + GetPrimordialRegister(reg_info, gdb_comm)) + AArch64SVEReconfigure(); + return true; } } else { @@ -390,6 +406,10 @@ } else { // This is an actual register, write it success = SetPrimordialRegister(reg_info, gdb_comm); + + if (success && do_reconfigure_arm64_sve && + GetPrimordialRegister(reg_info, gdb_comm)) + AArch64SVEReconfigure(); } // Check if writing this register will invalidate any other register @@ -655,9 +675,8 @@ if (m_thread.GetProcess().get()) { const ArchSpec &arch = m_thread.GetProcess()->GetTarget().GetArchitecture(); - if (arch.IsValid() && - (arch.GetMachine() == llvm::Triple::aarch64 || - arch.GetMachine() == llvm::Triple::aarch64_32) && + if (arch.IsValid() && (arch.GetMachine() == llvm::Triple::aarch64 || + arch.GetMachine() == llvm::Triple::aarch64_32) && arch.GetTriple().getVendor() == llvm::Triple::Apple && arch.GetTriple().getOS() == llvm::Triple::IOS) { arm64_debugserver = true; @@ -712,6 +731,82 @@ return m_reg_info_sp->ConvertRegisterKindToRegisterNumber(kind, num); } +bool GDBRemoteRegisterContext::AArch64SVEReconfigure(void) { + if (!m_reg_info_sp) + return false; + + const RegisterInfo *reg_info = m_reg_info_sp->GetRegisterInfo("vg"); + if (!reg_info) + return false; + + uint64_t fail_value = LLDB_INVALID_ADDRESS; + uint32_t vg_reg_num = reg_info->kinds[eRegisterKindLLDB]; + uint64_t vg_reg_value = ReadRegisterAsUnsigned(vg_reg_num, fail_value); + + if (vg_reg_value != fail_value && vg_reg_value <= 32) { + const RegisterInfo *reg_info = m_reg_info_sp->GetRegisterInfo("p0"); + if (!reg_info || vg_reg_value == reg_info->byte_size) + return false; + + uint32_t end_reg_offset = 0; + std::vector<uint32_t> invalidate_regs; + if (m_reg_info_sp->UpdateARM64SVERegistersInfos( + vg_reg_value, end_reg_offset, invalidate_regs)) { + uint64_t bytes_to_copy = m_reg_data.GetByteSize(); + if (end_reg_offset < bytes_to_copy) + bytes_to_copy = end_reg_offset; + + // Make a heap based buffer that is big enough to store all registers + DataBufferSP reg_data_sp(new DataBufferHeap(end_reg_offset, 0)); + m_reg_data.CopyData(0, bytes_to_copy, reg_data_sp->GetBytes()); + m_reg_data.Clear(); + m_reg_data.SetData(reg_data_sp); + m_reg_data.SetByteOrder(GetByteOrder()); + + for (auto ® : invalidate_regs) + m_reg_valid[reg] = false; + + return true; + } + } + + return false; +} + +bool GDBRemoteDynamicRegisterInfo::UpdateARM64SVERegistersInfos( + uint64_t vg, uint32_t &end_reg_offset, + std::vector<uint32_t> &invalidate_regs) { + // SVE Z register size is vg x 8 bytes. + uint32_t z_reg_byte_size = vg * 8; + + for (auto ® : m_regs) { + if (reg.value_regs == nullptr) { + if (reg.name[0] == 'z' && isdigit(reg.name[1])) { + reg.byte_size = z_reg_byte_size; + invalidate_regs.push_back(reg.kinds[eRegisterKindLLDB]); + } else if (reg.name[0] == 'p' && isdigit(reg.name[1])) { + reg.byte_size = vg; + invalidate_regs.push_back(reg.kinds[eRegisterKindLLDB]); + } else if (strcmp(reg.name, "ffr") == 0) { + reg.byte_size = vg; + invalidate_regs.push_back(reg.kinds[eRegisterKindLLDB]); + } + + reg.byte_offset = end_reg_offset; + + if (reg.invalidate_regs) { + for (int i = 0; reg.invalidate_regs[i] != LLDB_INVALID_REGNUM; i++) + GetRegisterInfoAtIndex(reg.invalidate_regs[i])->byte_offset = + end_reg_offset; + } + + end_reg_offset += reg.byte_size; + } + } + + return true; +} + void GDBRemoteDynamicRegisterInfo::CloneFrom( GDBRemoteDynamicRegisterInfoSP proc_reginfo) { m_regs = proc_reginfo->m_regs; Index: lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.h =================================================================== --- lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.h +++ lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.h @@ -59,6 +59,9 @@ uint32_t ConvertRegisterKindToRegisterNumber(uint32_t kind, uint32_t num) const; + const lldb_private::RegisterInfo * + GetRegisterInfo(llvm::StringRef reg_name) const; + void Dump() const; void Clear(); @@ -76,9 +79,6 @@ typedef std::vector<uint8_t> dwarf_opcode; typedef std::map<uint32_t, dwarf_opcode> dynamic_reg_size_map; - const lldb_private::RegisterInfo * - GetRegisterInfo(llvm::StringRef reg_name) const; - void MoveFrom(DynamicRegisterInfo &&info); reg_collection m_regs; Index: lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.cpp =================================================================== --- lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.cpp +++ lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.cpp @@ -531,6 +531,17 @@ } } + // Create per thread reginfo to support AArch64 SVE dynamic register sizes. + if (arch.GetMachine() == llvm::Triple::aarch64 || + arch.GetMachine() == llvm::Triple::aarch64_be) { + for (const auto ® : m_regs) { + if (strcmp(reg.name, "vg") == 0) { + m_per_thread_reginfo = true; + break; + } + } + } + if (!generic_regs_specified) { switch (arch.GetMachine()) { case llvm::Triple::aarch64: @@ -682,6 +693,7 @@ m_invalidate_regs_map.clear(); m_dynamic_reg_size_map.clear(); m_reg_data_byte_size = 0; + m_per_thread_reginfo = false; m_finalized = false; } Index: lldb/source/Plugins/Process/Linux/NativeRegisterContextLinux_arm64.cpp =================================================================== --- lldb/source/Plugins/Process/Linux/NativeRegisterContextLinux_arm64.cpp +++ lldb/source/Plugins/Process/Linux/NativeRegisterContextLinux_arm64.cpp @@ -299,14 +299,31 @@ if (m_sve_state == SVEState::Disabled || m_sve_state == SVEState::Unknown) return Status("SVE disabled or not supported"); else { - if (GetRegisterInfo().IsSVERegVG(reg)) - return Status("SVE state change operation not supported"); - // Target has SVE enabled, we will read and cache SVE ptrace data error = ReadAllSVE(); if (error.Fail()) return error; + if (GetRegisterInfo().IsSVERegVG(reg)) { + uint64_t vg_value = reg_value.GetAsUInt64(); + + if (sve_vl_valid(vg_value * 8)) { + if (m_sve_header_is_valid && vg_value == GetSVERegVG()) + return error; + + SetSVERegVG(vg_value); + + error = WriteSVEHeader(); + if (error.Success()) + ConfigureRegisterContext(); + + if (m_sve_header_is_valid && vg_value == GetSVERegVG()) + return error; + } + + return Status("SVE vector length update failed."); + } + // If target supports SVE but currently in FPSIMD mode. if (m_sve_state == SVEState::FPSIMD) { // Here we will check if writing this SVE register enables
_______________________________________________ lldb-commits mailing list lldb-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits