omjavaid updated this revision to Diff 303786.
omjavaid added a comment.

This updates after remove invalidate_regs from primary registers D91057 
<https://reviews.llvm.org/D91057>. Currently only SVE Z registers had 
invalidate_reg list populated which has been removed to send less information 
across while exchanging those registers.


CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D82863/new/

https://reviews.llvm.org/D82863

Files:
  lldb/source/Plugins/Process/Linux/NativeRegisterContextLinux_arm64.cpp
  lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.cpp
  lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.h
  lldb/source/Plugins/Process/Utility/RegisterInfoPOSIX_arm64.cpp.rej
  lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.cpp
  lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.h
  lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp

Index: lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp
===================================================================
--- lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp
+++ lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp
@@ -1761,6 +1761,21 @@
         gdb_thread->PrivateSetRegisterValue(pair.first, buffer_sp->GetData());
       }
 
+      // Code below is specific to AArch64 target in SVE state
+      // If expedited register set contains vector granule (vg) register
+      // then thread's register context reconfiguration is triggered by
+      // calling UpdateARM64SVERegistersInfos.
+      const ArchSpec &arch = GetTarget().GetArchitecture();
+      if (arch.IsValid() && (arch.GetMachine() == llvm::Triple::aarch64 ||
+                             arch.GetMachine() == llvm::Triple::aarch64_be)) {
+        GDBRemoteRegisterContext *reg_ctx_sp =
+            static_cast<GDBRemoteRegisterContext *>(
+                gdb_thread->GetRegisterContext().get());
+
+        if (reg_ctx_sp)
+          reg_ctx_sp->AArch64SVEReconfigure();
+      }
+
       thread_sp->SetName(thread_name.empty() ? nullptr : thread_name.c_str());
 
       gdb_thread->SetThreadDispatchQAddr(thread_dispatch_qaddr);
Index: lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.h
===================================================================
--- lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.h
+++ lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.h
@@ -40,6 +40,9 @@
 
   void HardcodeARMRegisters(bool from_scratch);
 
+  bool UpdateARM64SVERegistersInfos(uint64_t vg, uint32_t &end_reg_offset,
+                                    std::vector<uint32_t> &invalidate_regs);
+
   void CloneFrom(GDBRemoteDynamicRegisterInfoSP process_reginfo);
 };
 
@@ -79,6 +82,8 @@
   uint32_t ConvertRegisterKindToRegisterNumber(lldb::RegisterKind kind,
                                                uint32_t num) override;
 
+  bool AArch64SVEReconfigure();
+
 protected:
   friend class ThreadGDBRemote;
 
Index: lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.cpp
===================================================================
--- lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.cpp
+++ lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.cpp
@@ -213,8 +213,8 @@
           for (int i = 0; i < regcount; i++) {
             struct RegisterInfo *reginfo =
                 m_reg_info_sp->GetRegisterInfoAtIndex(i);
-            if (reginfo->byte_offset + reginfo->byte_size 
-                   <= buffer_sp->GetByteSize()) {
+            if (reginfo->byte_offset + reginfo->byte_size <=
+                buffer_sp->GetByteSize()) {
               m_reg_valid[i] = true;
             } else {
               m_reg_valid[i] = false;
@@ -343,6 +343,17 @@
   if (dst == nullptr)
     return false;
 
+  // Code below is specific to AArch64 target in SVE state
+  // If vector granule (vg) register is being written then thread's
+  // register context reconfiguration is triggered on success.
+  bool do_reconfigure_arm64_sve = false;
+  const ArchSpec &arch = process->GetTarget().GetArchitecture();
+  if (arch.IsValid() && (arch.GetMachine() == llvm::Triple::aarch64 ||
+                         arch.GetMachine() == llvm::Triple::aarch64_be)) {
+    if (strcmp(reg_info->name, "vg") == 0)
+      do_reconfigure_arm64_sve = true;
+  }
+
   if (data.CopyByteOrderedData(data_offset,                // src offset
                                reg_info->byte_size,        // src length
                                dst,                        // dst
@@ -362,6 +373,11 @@
 
         {
           SetAllRegisterValid(false);
+
+          if (do_reconfigure_arm64_sve &&
+              GetPrimordialRegister(reg_info, gdb_comm))
+            AArch64SVEReconfigure();
+
           return true;
         }
       } else {
@@ -390,6 +406,10 @@
         } else {
           // This is an actual register, write it
           success = SetPrimordialRegister(reg_info, gdb_comm);
+
+          if (success && do_reconfigure_arm64_sve &&
+              GetPrimordialRegister(reg_info, gdb_comm))
+            AArch64SVEReconfigure();
         }
 
         // Check if writing this register will invalidate any other register
@@ -655,9 +675,8 @@
       if (m_thread.GetProcess().get()) {
         const ArchSpec &arch =
             m_thread.GetProcess()->GetTarget().GetArchitecture();
-        if (arch.IsValid() && 
-            (arch.GetMachine() == llvm::Triple::aarch64 ||
-             arch.GetMachine() == llvm::Triple::aarch64_32) &&
+        if (arch.IsValid() && (arch.GetMachine() == llvm::Triple::aarch64 ||
+                               arch.GetMachine() == llvm::Triple::aarch64_32) &&
             arch.GetTriple().getVendor() == llvm::Triple::Apple &&
             arch.GetTriple().getOS() == llvm::Triple::IOS) {
           arm64_debugserver = true;
@@ -712,6 +731,79 @@
   return m_reg_info_sp->ConvertRegisterKindToRegisterNumber(kind, num);
 }
 
+bool GDBRemoteRegisterContext::AArch64SVEReconfigure(void) {
+  if (!m_reg_info_sp)
+    return false;
+
+  const RegisterInfo *reg_info = m_reg_info_sp->GetRegisterInfo("vg");
+  if (!reg_info)
+    return false;
+
+  uint64_t fail_value = LLDB_INVALID_ADDRESS;
+  uint32_t vg_reg_num = reg_info->kinds[eRegisterKindLLDB];
+  uint64_t vg_reg_value = ReadRegisterAsUnsigned(vg_reg_num, fail_value);
+
+  if (vg_reg_value != fail_value && vg_reg_value <= 32) {
+    const RegisterInfo *reg_info = m_reg_info_sp->GetRegisterInfo("p0");
+    if (!reg_info || vg_reg_value == reg_info->byte_size)
+      return false;
+
+    uint32_t end_reg_offset = 0;
+    std::vector<uint32_t> invalidate_regs;
+    if (m_reg_info_sp->UpdateARM64SVERegistersInfos(
+            vg_reg_value, end_reg_offset, invalidate_regs)) {
+      uint64_t bytes_to_copy = m_reg_data.GetByteSize();
+      if (end_reg_offset < bytes_to_copy)
+        bytes_to_copy = end_reg_offset;
+
+      // Make a heap based buffer that is big enough to store all registers
+      DataBufferSP reg_data_sp(new DataBufferHeap(end_reg_offset, 0));
+      m_reg_data.CopyData(0, bytes_to_copy, reg_data_sp->GetBytes());
+      m_reg_data.Clear();
+      m_reg_data.SetData(reg_data_sp);
+      m_reg_data.SetByteOrder(GetByteOrder());
+
+      for (auto &reg : invalidate_regs)
+        m_reg_valid[reg] = false;
+
+      return true;
+    }
+  }
+
+  return false;
+}
+
+bool GDBRemoteDynamicRegisterInfo::UpdateARM64SVERegistersInfos(
+    uint64_t vg, uint32_t &end_reg_offset,
+    std::vector<uint32_t> &invalidate_regs) {
+  // SVE Z register size is vg x 8 bytes.
+  uint32_t z_reg_byte_size = vg * 8;
+
+  for (auto &reg : m_regs) {
+    if (reg.value_regs == nullptr) {
+      if (reg.name[0] == 'z' && isdigit(reg.name[1])) {
+        reg.byte_size = z_reg_byte_size;
+        invalidate_regs.push_back(reg.kinds[eRegisterKindLLDB]);
+      } else if (reg.name[0] == 'p' && isdigit(reg.name[1])) {
+        reg.byte_size = vg;
+        invalidate_regs.push_back(reg.kinds[eRegisterKindLLDB]);
+      } else if (strcmp(reg.name, "ffr") == 0) {
+        reg.byte_size = vg;
+        invalidate_regs.push_back(reg.kinds[eRegisterKindLLDB]);
+      }
+      reg.byte_offset = end_reg_offset;
+      end_reg_offset += reg.byte_size;
+    }
+  }
+
+  for (auto &reg : m_regs) {
+    if (reg.value_regs != nullptr)
+      reg.byte_offset = GetRegisterInfoAtIndex(reg.value_regs[0])->byte_offset;
+  }
+
+  return true;
+}
+
 void GDBRemoteDynamicRegisterInfo::CloneFrom(
     GDBRemoteDynamicRegisterInfoSP proc_reginfo) {
   m_regs = proc_reginfo->m_regs;
Index: lldb/source/Plugins/Process/Utility/RegisterInfoPOSIX_arm64.cpp.rej
===================================================================
--- /dev/null
+++ lldb/source/Plugins/Process/Utility/RegisterInfoPOSIX_arm64.cpp.rej
@@ -0,0 +1,137 @@
+--- lldb/source/Plugins/Process/Utility/RegisterInfoPOSIX_arm64.cpp
++++ lldb/source/Plugins/Process/Utility/RegisterInfoPOSIX_arm64.cpp
+@@ -270,60 +274,92 @@ RegisterInfoPOSIX_arm64::ConfigureVectorRegisterInfos(uint32_t sve_vq) {
+ 
+   m_vector_reg_vq = sve_vq;
+ 
+-  if (sve_vq == eVectorQuadwordAArch64) {
+-    m_register_info_count =
+-        static_cast<uint32_t>(sizeof(g_register_infos_arm64_le) /
+-                              sizeof(g_register_infos_arm64_le[0]));
+-    m_register_info_p = g_register_infos_arm64_le;
+-
+-    return m_vector_reg_vq;
+-  }
+-
+-  m_register_info_count =
+-      static_cast<uint32_t>(sizeof(g_register_infos_arm64_sve_le) /
+-                            sizeof(g_register_infos_arm64_sve_le[0]));
+-
+   std::vector<lldb_private::RegisterInfo> &reg_info_ref =
+       m_per_vq_reg_infos[sve_vq];
+ 
+   if (reg_info_ref.empty()) {
+-    reg_info_ref = llvm::makeArrayRef(g_register_infos_arm64_sve_le,
+-                                      m_register_info_count);
+ 
+-    uint32_t offset = SVE_REGS_DEFAULT_OFFSET_LINUX;
++    m_dynamic_reg_sets.clear();
++    uint32_t dyn_regset_index = 0;
++    if (sve_vq == eVectorQuadwordAArch64) {
++      reg_info_ref =
++          llvm::makeArrayRef(g_register_infos_arm64_le, fpu_fpcr + 1);
++
++      if (IsPAuthEnabled()) {
++        InsertPAuthRegisters(reg_info_ref, fpu_fpcr, dyn_regset_index);
++        dyn_regset_index++;
++      }
++    } else {
++      reg_info_ref =
++          llvm::makeArrayRef(g_register_infos_arm64_sve_le, sve_ffr + 1);
++
++      if (IsPAuthEnabled()) {
++        InsertPAuthRegisters(reg_info_ref, sve_ffr, dyn_regset_index);
++        dyn_regset_index++;
++      }
++
++      uint32_t offset = SVE_REGS_DEFAULT_OFFSET_LINUX;
++
++      reg_info_ref[sve_vg].byte_offset = offset;
++      offset += reg_info_ref[sve_vg].byte_size;
++
++      // Update Z registers size and offset
++      uint32_t s_reg_base = fpu_s0;
++      uint32_t d_reg_base = fpu_d0;
++      uint32_t v_reg_base = fpu_v0;
++      uint32_t z_reg_base = sve_z0;
++
++      for (uint32_t index = 0; index < 32; index++) {
++        reg_info_ref[s_reg_base + index].byte_offset = offset;
++        reg_info_ref[d_reg_base + index].byte_offset = offset;
++        reg_info_ref[v_reg_base + index].byte_offset = offset;
++        reg_info_ref[z_reg_base + index].byte_offset = offset;
++
++        reg_info_ref[z_reg_base + index].byte_size =
++            sve_vq * SVE_QUAD_WORD_BYTES;
++        offset += reg_info_ref[z_reg_base + index].byte_size;
++      }
++
++      // Update P registers and FFR size and offset
++      for (uint32_t it = sve_p0; it <= sve_ffr; it++) {
++        reg_info_ref[it].byte_offset = offset;
++        reg_info_ref[it].byte_size = sve_vq * SVE_QUAD_WORD_BYTES / 8;
++        offset += reg_info_ref[it].byte_size;
++      }
++
++      reg_info_ref[fpu_fpsr].byte_offset = offset;
++      reg_info_ref[fpu_fpcr].byte_offset = offset + 4;
++    }
+ 
+-    reg_info_ref[sve_vg].byte_offset = offset;
+-    offset += reg_info_ref[sve_vg].byte_size;
++    m_per_vq_reg_infos[sve_vq] = reg_info_ref;
++  }
+ 
+-    // Update Z registers size and offset
+-    uint32_t s_reg_base = fpu_s0;
+-    uint32_t d_reg_base = fpu_d0;
+-    uint32_t v_reg_base = fpu_v0;
+-    uint32_t z_reg_base = sve_z0;
++  m_register_info_p = reg_info_ref.data();
++  return m_vector_reg_vq;
++}
+ 
+-    for (uint32_t index = 0; index < 32; index++) {
+-      reg_info_ref[s_reg_base + index].byte_offset = offset;
+-      reg_info_ref[d_reg_base + index].byte_offset = offset;
+-      reg_info_ref[v_reg_base + index].byte_offset = offset;
+-      reg_info_ref[z_reg_base + index].byte_offset = offset;
++void RegisterInfoPOSIX_arm64::InsertPAuthRegisters(
++    std::vector<lldb_private::RegisterInfo> &reg_info_ref,
++    uint32_t start_reg_num, uint32_t dyn_regset_index) {
++  m_dynamic_reg_sets[dyn_regset_index] = g_reg_set_pauth_arm64;
+ 
+-      reg_info_ref[z_reg_base + index].byte_size = sve_vq * SVE_QUAD_WORD_BYTES;
+-      offset += reg_info_ref[z_reg_base + index].byte_size;
+-    }
++  m_dynamic_regset_regnums[dyn_regset_index] = {
++      start_reg_num + 1, start_reg_num + 2, LLDB_INVALID_REGNUM};
++  m_dynamic_reg_sets[dyn_regset_index].registers =
++      m_dynamic_regset_regnums[dyn_regset_index].data();
+ 
+-    // Update P registers and FFR size and offset
+-    for (uint32_t it = sve_p0; it <= sve_ffr; it++) {
+-      reg_info_ref[it].byte_offset = offset;
+-      reg_info_ref[it].byte_size = sve_vq * SVE_QUAD_WORD_BYTES / 8;
+-      offset += reg_info_ref[it].byte_size;
+-    }
++  reg_info_ref.push_back(g_register_infos_pauth[0]);
++  reg_info_ref.push_back(g_register_infos_pauth[1]);
+ 
+-    reg_info_ref[fpu_fpsr].byte_offset = offset;
+-    reg_info_ref[fpu_fpcr].byte_offset = offset + 4;
+-  }
++  reg_info_ref[start_reg_num + 1].kinds[lldb::eRegisterKindLLDB] =
++      start_reg_num + 1;
++  reg_info_ref[start_reg_num + 2].kinds[lldb::eRegisterKindLLDB] =
++      start_reg_num + 2;
++}
+ 
+-  m_register_info_p = reg_info_ref.data();
+-  return m_vector_reg_vq;
++bool RegisterInfoPOSIX_arm64::IsPAuthReg(unsigned reg) const {
++  return true; //(m_pauth_regnum_range.start >= reg && reg <=
++               // m_pauth_regnum_range.end);
+ }
+ 
+ bool RegisterInfoPOSIX_arm64::IsSVEZReg(unsigned reg) const {
Index: lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.h
===================================================================
--- lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.h
+++ lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.h
@@ -59,6 +59,9 @@
   uint32_t ConvertRegisterKindToRegisterNumber(uint32_t kind,
                                                uint32_t num) const;
 
+  const lldb_private::RegisterInfo *
+  GetRegisterInfo(llvm::StringRef reg_name) const;
+
   void Dump() const;
 
   void Clear();
@@ -76,9 +79,6 @@
   typedef std::vector<uint8_t> dwarf_opcode;
   typedef std::map<uint32_t, dwarf_opcode> dynamic_reg_size_map;
 
-  const lldb_private::RegisterInfo *
-  GetRegisterInfo(llvm::StringRef reg_name) const;
-
   void MoveFrom(DynamicRegisterInfo &&info);
 
   reg_collection m_regs;
Index: lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.cpp
===================================================================
--- lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.cpp
+++ lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.cpp
@@ -470,6 +470,15 @@
       m_regs[i].value_regs = m_value_regs_map[i].data();
     else
       m_regs[i].value_regs = nullptr;
+
+    // Additionally check for AArch64 SVE support to enable per thread register
+    // info object for dynamic register sizes.
+    if ((arch.GetMachine() == llvm::Triple::aarch64 ||
+         arch.GetMachine() == llvm::Triple::aarch64_be) &&
+        strcmp(m_regs[i].name, "vg") == 0) {
+      m_per_thread_reginfo = true;
+      break;
+    }
   }
 
   // Expand all invalidation dependencies
@@ -531,6 +540,29 @@
     }
   }
 
+  // On AArch64 architecture with SVE support enabled we set offsets on client
+  // side based on register size and its position in m_regs.
+  // All primary registers (i.e., reg.value_regs == nullptr) will have unique
+  // offset and all pseudo registers will share offset of their value_reg
+  if (arch.GetMachine() == llvm::Triple::aarch64 ||
+      arch.GetMachine() == llvm::Triple::aarch64_be) {
+    if (m_per_thread_reginfo) {
+      uint32_t reg_offset = 0;
+      for (auto &reg : m_regs) {
+        if (reg.value_regs == nullptr) {
+          reg.byte_offset = reg_offset;
+          reg_offset += reg.byte_size;
+        }
+      }
+
+      for (auto &reg : m_regs) {
+        if (reg.value_regs != nullptr)
+          reg.byte_offset =
+              GetRegisterInfoAtIndex(reg.value_regs[0])->byte_offset;
+      }
+    }
+  }
+
   if (!generic_regs_specified) {
     switch (arch.GetMachine()) {
     case llvm::Triple::aarch64:
@@ -682,6 +714,7 @@
   m_invalidate_regs_map.clear();
   m_dynamic_reg_size_map.clear();
   m_reg_data_byte_size = 0;
+  m_per_thread_reginfo = false;
   m_finalized = false;
 }
 
Index: lldb/source/Plugins/Process/Linux/NativeRegisterContextLinux_arm64.cpp
===================================================================
--- lldb/source/Plugins/Process/Linux/NativeRegisterContextLinux_arm64.cpp
+++ lldb/source/Plugins/Process/Linux/NativeRegisterContextLinux_arm64.cpp
@@ -299,14 +299,31 @@
     if (m_sve_state == SVEState::Disabled || m_sve_state == SVEState::Unknown)
       return Status("SVE disabled or not supported");
     else {
-      if (GetRegisterInfo().IsSVERegVG(reg))
-        return Status("SVE state change operation not supported");
-
       // Target has SVE enabled, we will read and cache SVE ptrace data
       error = ReadAllSVE();
       if (error.Fail())
         return error;
 
+      if (GetRegisterInfo().IsSVERegVG(reg)) {
+        uint64_t vg_value = reg_value.GetAsUInt64();
+
+        if (sve_vl_valid(vg_value * 8)) {
+          if (m_sve_header_is_valid && vg_value == GetSVERegVG())
+            return error;
+
+          SetSVERegVG(vg_value);
+
+          error = WriteSVEHeader();
+          if (error.Success())
+            ConfigureRegisterContext();
+
+          if (m_sve_header_is_valid && vg_value == GetSVERegVG())
+            return error;
+        }
+
+        return Status("SVE vector length update failed.");
+      }
+
       // If target supports SVE but currently in FPSIMD mode.
       if (m_sve_state == SVEState::FPSIMD) {
         // Here we will check if writing this SVE register enables
_______________________________________________
lldb-commits mailing list
lldb-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits

Reply via email to