Author: barsolo2000 Date: 2026-02-25T11:53:23-06:00 New Revision: 94d9f1b3cbb02700d9cd3339c1dbf44c0d13b550
URL: https://github.com/llvm/llvm-project/commit/94d9f1b3cbb02700d9cd3339c1dbf44c0d13b550 DIFF: https://github.com/llvm/llvm-project/commit/94d9f1b3cbb02700d9cd3339c1dbf44c0d13b550.diff LOG: [lldb] Batch breakpoint step-over for threads stopped at the same site (re-land) (#182944) Re-land https://github.com/llvm/llvm-project/pull/180101 since it was reverted here https://github.com/llvm/llvm-project/pull/182431 because of a flaky test. This PR include the modified test that should pass from https://github.com/llvm/llvm-project/pull/182415 : When multiple threads are stopped at the same breakpoint, LLDB currently steps each thread over the breakpoint one at a time. Each step requires disabling the breakpoint, single-stepping one thread, and re-enabling it, resulting in N disable/enable cycles and N individual vCont packets for N threads. This is a common scenario for hot breakpoints in multithreaded programs and scales poorly. This patch batches the step-over so that all threads at the same breakpoint site are stepped together in a single vCont packet, with the breakpoint disabled once at the start and re-enabled once after the last thread finishes. At the top of WillResume, any leftover StepOverBreakpoint plans from a previous cycle are popped with their re-enable side effect suppressed via SetReenabledBreakpointSite, giving a clean slate. SetupToStepOverBreakpointIfNeeded then creates fresh plans for all threads that still need to step over a breakpoint, and these are grouped by breakpoint address. For groups with multiple threads, each plan is set to defer its re-enable through SetDeferReenableBreakpointSite. Instead of re-enabling the breakpoint directly when a plan completes, it calls ThreadFinishedSteppingOverBreakpoint, which decrements a per-address tracking count. The breakpoint is only re-enabled when the count reaches zero. All threads in the largest group are resumed together in a single batched vCont packet. If some threads don't complete their step in one cycle, the pop-and-recreate logic naturally re-batches the remaining threads on the next WillResume call. For 10 threads at the same breakpoint, this reduces the operation from 10 z0/Z0 pairs and 10 vCont packets to 1 z0 + 1 Z0 and a few progressively smaller batched vCont packets. Co-authored-by: Bar Soloveychik <[email protected]> Added: lldb/test/API/functionalities/gdb_remote_client/TestBatchedBreakpointStepOver.py lldb/test/API/functionalities/thread/concurrent_events/TestConcurrentBatchedBreakpointStepOver.py Modified: lldb/include/lldb/Target/ThreadList.h lldb/include/lldb/Target/ThreadPlanStepOverBreakpoint.h lldb/source/Target/ThreadList.cpp lldb/source/Target/ThreadPlanStepOverBreakpoint.cpp Removed: ################################################################################ diff --git a/lldb/include/lldb/Target/ThreadList.h b/lldb/include/lldb/Target/ThreadList.h index c108962003598..6920cefc20fd9 100644 --- a/lldb/include/lldb/Target/ThreadList.h +++ b/lldb/include/lldb/Target/ThreadList.h @@ -18,6 +18,9 @@ #include "lldb/Utility/UserID.h" #include "lldb/lldb-private.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/DenseSet.h" + namespace lldb_private { // This is a thread list with lots of functionality for use only by the process @@ -141,6 +144,19 @@ class ThreadList : public ThreadCollection { /// Precondition: both thread lists must be belong to the same process. void Update(ThreadList &rhs); + /// Called by ThreadPlanStepOverBreakpoint when a thread finishes stepping + /// over a breakpoint. This tracks which threads are still stepping over + /// each breakpoint address, and only re-enables the breakpoint when ALL + /// threads have finished stepping over it. + void ThreadFinishedSteppingOverBreakpoint(lldb::addr_t breakpoint_addr, + lldb::tid_t tid); + + /// Register a thread that is about to step over a breakpoint. + /// The breakpoint will be re-enabled only after all registered threads + /// have called ThreadFinishedSteppingOverBreakpoint. + void RegisterThreadSteppingOverBreakpoint(lldb::addr_t breakpoint_addr, + lldb::tid_t tid); + protected: void SetShouldReportStop(Vote vote); @@ -154,6 +170,13 @@ class ThreadList : public ThreadCollection { m_selected_tid; ///< For targets that need the notion of a current thread. std::vector<lldb::tid_t> m_expression_tid_stack; + /// Tracks which threads are currently stepping over each breakpoint address. + /// Key: breakpoint address, Value: set of thread IDs stepping over it. + /// When a thread finishes stepping, it's removed from the set. When the set + /// becomes empty, the breakpoint is re-enabled. + llvm::DenseMap<lldb::addr_t, llvm::DenseSet<lldb::tid_t>> + m_threads_stepping_over_bp; + private: ThreadList() = delete; }; diff --git a/lldb/include/lldb/Target/ThreadPlanStepOverBreakpoint.h b/lldb/include/lldb/Target/ThreadPlanStepOverBreakpoint.h index 0da8dbf44ffd8..6537ac91af449 100644 --- a/lldb/include/lldb/Target/ThreadPlanStepOverBreakpoint.h +++ b/lldb/include/lldb/Target/ThreadPlanStepOverBreakpoint.h @@ -36,6 +36,24 @@ class ThreadPlanStepOverBreakpoint : public ThreadPlan { lldb::addr_t GetBreakpointLoadAddress() const { return m_breakpoint_addr; } + /// When set to true, the breakpoint site will NOT be re-enabled directly + /// by this plan. Instead, the plan will call + /// ThreadList::ThreadFinishedSteppingOverBreakpoint() when it completes, + /// allowing ThreadList to track all threads stepping over the same + /// breakpoint and only re-enable it when ALL threads have finished. + void SetDeferReenableBreakpointSite(bool defer) { + m_defer_reenable_breakpoint_site = defer; + } + + bool GetDeferReenableBreakpointSite() const { + return m_defer_reenable_breakpoint_site; + } + + /// Mark the breakpoint site as already re-enabled, suppressing any + /// re-enable in DidPop()/ThreadDestroyed(). Used when discarding plans + /// during WillResume cleanup to avoid spurious breakpoint toggles. + void SetReenabledBreakpointSite() { m_reenabled_breakpoint_site = true; } + protected: bool DoPlanExplainsStop(Event *event_ptr) override; bool DoWillResume(lldb::StateType resume_state, bool current_plan) override; @@ -47,6 +65,7 @@ class ThreadPlanStepOverBreakpoint : public ThreadPlan { lldb::user_id_t m_breakpoint_site_id; bool m_auto_continue; bool m_reenabled_breakpoint_site; + bool m_defer_reenable_breakpoint_site; ThreadPlanStepOverBreakpoint(const ThreadPlanStepOverBreakpoint &) = delete; const ThreadPlanStepOverBreakpoint & diff --git a/lldb/source/Target/ThreadList.cpp b/lldb/source/Target/ThreadList.cpp index 77a1c40b95f70..eb69bd038cc42 100644 --- a/lldb/source/Target/ThreadList.cpp +++ b/lldb/source/Target/ThreadList.cpp @@ -15,10 +15,13 @@ #include "lldb/Target/Thread.h" #include "lldb/Target/ThreadList.h" #include "lldb/Target/ThreadPlan.h" +#include "lldb/Target/ThreadPlanStepOverBreakpoint.h" #include "lldb/Utility/LLDBAssert.h" #include "lldb/Utility/LLDBLog.h" #include "lldb/Utility/Log.h" #include "lldb/Utility/State.h" +#include "llvm/ADT/DenseSet.h" +#include "llvm/ADT/SmallVector.h" using namespace lldb; using namespace lldb_private; @@ -504,9 +507,30 @@ bool ThreadList::WillResume(RunDirection &direction) { collection::iterator pos, end = m_threads.end(); + // Clear tracking state from the previous stop and pop any leftover + // StepOverBreakpoint plans. This gives us a clean slate: plans will be + // recreated fresh by SetupToStepOverBreakpointIfNeeded below, and the + // batching logic will recompute deferred state from scratch. + m_threads_stepping_over_bp.clear(); + for (const auto &thread_sp : m_threads) { + ThreadPlan *plan = thread_sp->GetCurrentPlan(); + if (plan && plan->GetKind() == ThreadPlan::eKindStepOverBreakpoint) { + // Suppress the re-enable side effect in DidPop() — the breakpoint + // may still be disabled from the previous batch, and we don't want + // to toggle it. The new plans will handle disable/re-enable correctly. + static_cast<ThreadPlanStepOverBreakpoint *>(plan) + ->SetReenabledBreakpointSite(); + thread_sp->DiscardPlan(); + } + } + // Go through the threads and see if any thread wants to run just itself. // if so then pick one and run it. + // Collect threads for batched vCont for multiple threads at the same + // breakpoint. + llvm::SmallVector<ThreadSP> batched_step_threads; + ThreadList run_me_only_list(m_process); run_me_only_list.SetStopID(m_process.GetStopID()); @@ -576,6 +600,13 @@ bool ThreadList::WillResume(RunDirection &direction) { assert(thread_to_run->GetCurrentPlan()->GetDirection() == direction); } } else { + // Pre-scan to find all threads that need to step over a breakpoint, + // and group them by breakpoint address. This optimization allows us to + // step multiple threads over the same breakpoint with minimal breakpoint + // swaps, only the last thread in each group will re-enable the breakpoint. + llvm::DenseMap<lldb::addr_t, llvm::SmallVector<ThreadSP>> breakpoint_groups; + bool found_run_before_public_stop = false; + for (pos = m_threads.begin(); pos != end; ++pos) { ThreadSP thread_sp(*pos); if (thread_sp->GetResumeState() != eStateSuspended) { @@ -589,14 +620,69 @@ bool ThreadList::WillResume(RunDirection &direction) { assert(thread_sp->GetCurrentPlan()->GetDirection() == direction); // You can't say "stop others" and also want yourself to be suspended. assert(thread_sp->GetCurrentPlan()->RunState() != eStateSuspended); + + // Get the breakpoint address from the step-over-breakpoint plan. + ThreadPlan *current_plan = thread_sp->GetCurrentPlan(); + if (current_plan && + current_plan->GetKind() == ThreadPlan::eKindStepOverBreakpoint) { + ThreadPlanStepOverBreakpoint *bp_plan = + static_cast<ThreadPlanStepOverBreakpoint *>(current_plan); + lldb::addr_t bp_addr = bp_plan->GetBreakpointLoadAddress(); + breakpoint_groups[bp_addr].push_back(thread_sp); + } + thread_to_run = thread_sp; if (thread_sp->ShouldRunBeforePublicStop()) { // This takes precedence, so if we find one of these, service it: + found_run_before_public_stop = true; break; } } } } + + // Only apply batching optimization if we have a complete picture of + // breakpoint groups. If a ShouldRunBeforePublicStop thread caused the + // scan to exit early, the groups are incomplete and the priority thread + // must run solo. Deferred state will be cleaned up on next WillResume(). + if (!found_run_before_public_stop) { + // For each group of threads at the same breakpoint, register them with + // ThreadList and set them to use deferred re-enable. The breakpoint will + // only be re-enabled when ALL threads have finished stepping over it. + // Also collect threads for batched vCont if multiple threads at same BP. + for (auto &group : breakpoint_groups) { + lldb::addr_t bp_addr = group.first; + llvm::SmallVector<ThreadSP> &threads = group.second; + + if (threads.size() > 1) { + // Use tracking since multiple threads are stepping over the same + // breakpoint. + for (ThreadSP &thread_sp : threads) { + // Register this thread as stepping over the breakpoint. + RegisterThreadSteppingOverBreakpoint(bp_addr, thread_sp->GetID()); + + // Set the plan to defer re-enabling (use callback instead). + ThreadPlan *plan = thread_sp->GetCurrentPlan(); + // Verify the plan is actually a StepOverBreakpoint plan. + if (plan && + plan->GetKind() == ThreadPlan::eKindStepOverBreakpoint) { + ThreadPlanStepOverBreakpoint *bp_plan = + static_cast<ThreadPlanStepOverBreakpoint *>(plan); + bp_plan->SetDeferReenableBreakpointSite(true); + } + } + + // Pick the largest group for batched vCont. + if (threads.size() > batched_step_threads.size()) + batched_step_threads = threads; + } + // Keeps default behavior for a single thread at breakpoint. + } + + // If we found a batch, use the first thread as thread_to_run. + if (!batched_step_threads.empty()) + thread_to_run = batched_step_threads[0]; + } } if (thread_to_run != nullptr) { @@ -615,7 +701,24 @@ bool ThreadList::WillResume(RunDirection &direction) { bool need_to_resume = true; - if (thread_to_run == nullptr) { + if (!batched_step_threads.empty()) { + // Batched stepping: all threads in the batch step together, + // all other threads stay suspended. + llvm::DenseSet<lldb::tid_t> batch_tids; + for (ThreadSP &thread_sp : batched_step_threads) + batch_tids.insert(thread_sp->GetID()); + + for (const auto &thread_sp : m_threads) { + if (batch_tids.count(thread_sp->GetID()) > 0) { + // This thread is in the batch, let it step. + if (!thread_sp->ShouldResume(thread_sp->GetCurrentPlan()->RunState())) + need_to_resume = false; + } else { + // Suspend it since it's not in the batch. + thread_sp->ShouldResume(eStateSuspended); + } + } + } else if (thread_to_run == nullptr) { // Everybody runs as they wish: for (pos = m_threads.begin(); pos != end; ++pos) { ThreadSP thread_sp(*pos); @@ -801,3 +904,63 @@ ThreadList::ExpressionExecutionThreadPusher::ExpressionExecutionThreadPusher( m_thread_list->PushExpressionExecutionThread(m_tid); } } + +void ThreadList::RegisterThreadSteppingOverBreakpoint(addr_t breakpoint_addr, + tid_t tid) { + std::lock_guard<std::recursive_mutex> guard(GetMutex()); + m_threads_stepping_over_bp[breakpoint_addr].insert(tid); + + Log *log = GetLog(LLDBLog::Step); + LLDB_LOGF( + log, + "ThreadList::%s: Registered thread 0x%" PRIx64 + " stepping over breakpoint at 0x%" PRIx64 " (now %zu threads)", + __FUNCTION__, tid, breakpoint_addr, + static_cast<size_t>(m_threads_stepping_over_bp[breakpoint_addr].size())); +} + +void ThreadList::ThreadFinishedSteppingOverBreakpoint(addr_t breakpoint_addr, + tid_t tid) { + std::lock_guard<std::recursive_mutex> guard(GetMutex()); + + Log *log = GetLog(LLDBLog::Step); + + auto it = m_threads_stepping_over_bp.find(breakpoint_addr); + if (it == m_threads_stepping_over_bp.end()) { + // No threads registered for this breakpoint, re-enable directly. + LLDB_LOGF(log, + "ThreadList::%s: Thread 0x%" PRIx64 + " finished stepping over breakpoint at 0x%" PRIx64 + " but no threads were registered, re-enabling directly", + __FUNCTION__, tid, breakpoint_addr); + if (BreakpointSiteSP bp_site_sp = + m_process.GetBreakpointSiteList().FindByAddress(breakpoint_addr)) + m_process.EnableBreakpointSite(bp_site_sp.get()); + return; + } + + // Remove this thread from the set. + it->second.erase(tid); + + LLDB_LOGF(log, + "ThreadList::%s: Thread 0x%" PRIx64 + " finished stepping over breakpoint at 0x%" PRIx64 + " (%zu threads remaining)", + __FUNCTION__, tid, breakpoint_addr, + static_cast<size_t>(it->second.size())); + + // If no more threads are stepping over this breakpoint, re-enable it. + if (it->second.empty()) { + LLDB_LOGF(log, + "ThreadList::%s: All threads finished stepping over breakpoint " + "at 0x%" PRIx64 ", re-enabling breakpoint", + __FUNCTION__, breakpoint_addr); + + if (BreakpointSiteSP bp_site_sp = + m_process.GetBreakpointSiteList().FindByAddress(breakpoint_addr)) + m_process.EnableBreakpointSite(bp_site_sp.get()); + + // Clean up the entry. + m_threads_stepping_over_bp.erase(it); + } +} diff --git a/lldb/source/Target/ThreadPlanStepOverBreakpoint.cpp b/lldb/source/Target/ThreadPlanStepOverBreakpoint.cpp index 3602527a9231b..8b58ae541f368 100644 --- a/lldb/source/Target/ThreadPlanStepOverBreakpoint.cpp +++ b/lldb/source/Target/ThreadPlanStepOverBreakpoint.cpp @@ -10,6 +10,7 @@ #include "lldb/Target/Process.h" #include "lldb/Target/RegisterContext.h" +#include "lldb/Target/ThreadList.h" #include "lldb/Utility/LLDBLog.h" #include "lldb/Utility/Log.h" #include "lldb/Utility/Stream.h" @@ -21,14 +22,14 @@ using namespace lldb_private; // the pc. ThreadPlanStepOverBreakpoint::ThreadPlanStepOverBreakpoint(Thread &thread) - : ThreadPlan( - ThreadPlan::eKindStepOverBreakpoint, "Step over breakpoint trap", - thread, eVoteNo, - eVoteNoOpinion), // We need to report the run since this happens - // first in the thread plan stack when stepping over - // a breakpoint - m_breakpoint_addr(LLDB_INVALID_ADDRESS), - m_auto_continue(false), m_reenabled_breakpoint_site(false) + : ThreadPlan(ThreadPlan::eKindStepOverBreakpoint, + "Step over breakpoint trap", thread, eVoteNo, + eVoteNoOpinion), // We need to report the run since this + // happens first in the thread plan stack when + // stepping over a breakpoint + m_breakpoint_addr(LLDB_INVALID_ADDRESS), m_auto_continue(false), + m_reenabled_breakpoint_site(false), + m_defer_reenable_breakpoint_site(false) { m_breakpoint_addr = thread.GetRegisterContext()->GetPC(); @@ -155,10 +156,18 @@ bool ThreadPlanStepOverBreakpoint::MischiefManaged() { void ThreadPlanStepOverBreakpoint::ReenableBreakpointSite() { if (!m_reenabled_breakpoint_site) { m_reenabled_breakpoint_site = true; - BreakpointSiteSP bp_site_sp( - m_process.GetBreakpointSiteList().FindByAddress(m_breakpoint_addr)); - if (bp_site_sp) { - m_process.EnableBreakpointSite(bp_site_sp.get()); + + if (m_defer_reenable_breakpoint_site) { + // Let ThreadList track all threads stepping over this breakpoint. + // It will re-enable the breakpoint only when ALL threads have finished. + m_process.GetThreadList().ThreadFinishedSteppingOverBreakpoint( + m_breakpoint_addr, GetThread().GetID()); + } else { + // Default behavior: re-enable the breakpoint directly. + if (BreakpointSiteSP bp_site_sp = + m_process.GetBreakpointSiteList().FindByAddress( + m_breakpoint_addr)) + m_process.EnableBreakpointSite(bp_site_sp.get()); } } } diff --git a/lldb/test/API/functionalities/gdb_remote_client/TestBatchedBreakpointStepOver.py b/lldb/test/API/functionalities/gdb_remote_client/TestBatchedBreakpointStepOver.py new file mode 100644 index 0000000000000..953294a77f658 --- /dev/null +++ b/lldb/test/API/functionalities/gdb_remote_client/TestBatchedBreakpointStepOver.py @@ -0,0 +1,216 @@ +""" +Test that when multiple threads are stopped at the same breakpoint, LLDB sends +a batched vCont with multiple step actions and only one breakpoint disable/ +re-enable pair, rather than stepping each thread individually with repeated +breakpoint toggles. + +Uses a mock GDB server to directly verify the packets LLDB sends. +""" + +import re + +import lldb +from lldbsuite.test.lldbtest import * +from lldbsuite.test.decorators import * +from lldbsuite.test.gdbclientutils import * +from lldbsuite.test.lldbgdbclient import GDBRemoteTestBase + + +class TestBatchedBreakpointStepOver(GDBRemoteTestBase): + @skipIfXmlSupportMissing + def test(self): + BP_ADDR = 0x0000000000401020 + # PC after stepping past the breakpoint instruction. + STEPPED_PC = BP_ADDR + 1 + NUM_THREADS = 10 + TIDS = [0x101 + i for i in range(NUM_THREADS)] + + class MyResponder(MockGDBServerResponder): + def __init__(self): + MockGDBServerResponder.__init__(self) + self.resume_count = 0 + # Track which threads have completed their step. + self.stepped_threads = set() + + def qSupported(self, client_supported): + return ( + "PacketSize=3fff;QStartNoAckMode+;" + "qXfer:features:read+;swbreak+;hwbreak+" + ) + + def qfThreadInfo(self): + return "m" + ",".join("{:x}".format(t) for t in TIDS) + + def qsThreadInfo(self): + return "l" + + def haltReason(self): + # All threads stopped at the breakpoint address. + threads_str = ",".join("{:x}".format(t) for t in TIDS) + pcs_str = ",".join("{:x}".format(BP_ADDR) for _ in TIDS) + return "T05thread:{:x};threads:{};thread-pcs:{};" "swbreak:;".format( + TIDS[0], threads_str, pcs_str + ) + + def threadStopInfo(self, threadnum): + threads_str = ",".join("{:x}".format(t) for t in TIDS) + pcs_str = ",".join("{:x}".format(BP_ADDR) for _ in TIDS) + return "T05thread:{:x};threads:{};thread-pcs:{};" "swbreak:;".format( + threadnum, threads_str, pcs_str + ) + + def setBreakpoint(self, packet): + return "OK" + + def readRegisters(self): + return "00" * 160 + + def readRegister(self, regno): + return "00" * 8 + + def qXferRead(self, obj, annex, offset, length): + if annex == "target.xml": + return ( + """<?xml version="1.0"?> + <target version="1.0"> + <architecture>i386:x86-64</architecture> + <feature name="org.gnu.gdb.i386.core"> + <reg name="rax" bitsize="64" regnum="0" type="int" group="general"/> + <reg name="rbx" bitsize="64" regnum="1" type="int" group="general"/> + <reg name="rcx" bitsize="64" regnum="2" type="int" group="general"/> + <reg name="rdx" bitsize="64" regnum="3" type="int" group="general"/> + <reg name="rsi" bitsize="64" regnum="4" type="int" group="general"/> + <reg name="rdi" bitsize="64" regnum="5" type="int" group="general"/> + <reg name="rbp" bitsize="64" regnum="6" type="data_ptr" group="general"/> + <reg name="rsp" bitsize="64" regnum="7" type="data_ptr" group="general"/> + <reg name="r8" bitsize="64" regnum="8" type="int" group="general"/> + <reg name="r9" bitsize="64" regnum="9" type="int" group="general"/> + <reg name="r10" bitsize="64" regnum="10" type="int" group="general"/> + <reg name="r11" bitsize="64" regnum="11" type="int" group="general"/> + <reg name="r12" bitsize="64" regnum="12" type="int" group="general"/> + <reg name="r13" bitsize="64" regnum="13" type="int" group="general"/> + <reg name="r14" bitsize="64" regnum="14" type="int" group="general"/> + <reg name="r15" bitsize="64" regnum="15" type="int" group="general"/> + <reg name="rip" bitsize="64" regnum="16" type="code_ptr" group="general"/> + <reg name="eflags" bitsize="32" regnum="17" type="int" group="general"/> + <reg name="cs" bitsize="32" regnum="18" type="int" group="general"/> + <reg name="ss" bitsize="32" regnum="19" type="int" group="general"/> + </feature> + </target>""", + False, + ) + return None, False + + def other(self, packet): + if packet == "vCont?": + return "vCont;c;C;s;S" + if packet.startswith("vCont;"): + return self._handle_vCont(packet) + if packet.startswith("z"): + return "OK" + return "" + + def _handle_vCont(self, packet): + self.resume_count += 1 + # Parse step actions from vCont. + stepping_tids = [] + for action in packet[6:].split(";"): + if not action: + continue + if action.startswith("s:"): + tid_str = action[2:] + if "." in tid_str: + tid_str = tid_str.split(".")[1] + stepping_tids.append(int(tid_str, 16)) + + # All stepping threads complete their step. + for tid in stepping_tids: + self.stepped_threads.add(tid) + + all_done = self.stepped_threads >= set(TIDS) + + # Report stop, use the first stepping thread as the reporter. + report_tid = stepping_tids[0] if stepping_tids else TIDS[0] + threads_str = ",".join("{:x}".format(t) for t in TIDS) + if all_done: + # All threads moved past breakpoint. + pcs_str = ",".join("{:x}".format(STEPPED_PC) for _ in TIDS) + else: + # Stepped threads moved, others still at breakpoint. + pcs_str = ",".join( + "{:x}".format( + STEPPED_PC if t in self.stepped_threads else BP_ADDR + ) + for t in TIDS + ) + return "T05thread:{:x};threads:{};thread-pcs:{};".format( + report_tid, threads_str, pcs_str + ) + + self.server.responder = MyResponder() + self.runCmd("platform select remote-linux") + target = self.createTarget("a.yaml") + process = self.connect(target) + + self.assertEqual(process.GetNumThreads(), NUM_THREADS) + + # Set a breakpoint at BP_ADDR, all threads are already stopped there. + bkpt = target.BreakpointCreateByAddress(BP_ADDR) + self.assertTrue(bkpt.IsValid()) + + # Continue, LLDB should step all threads over the breakpoint. + process.Continue() + + # Collect packets from the log. + received = self.server.responder.packetLog.get_received() + + bp_addr_hex = "{:x}".format(BP_ADDR) + + # Count z0 (disable) and Z0 (enable) packets for our breakpoint. + z0_packets = [] + Z0_packets = [] + vcont_step_packets = [] + + for pkt in received: + if pkt.startswith("z0,{},".format(bp_addr_hex)): + z0_packets.append(pkt) + elif pkt.startswith("Z0,{},".format(bp_addr_hex)): + Z0_packets.append(pkt) + elif pkt.startswith("vCont;"): + step_count = len(re.findall(r";s:", pkt)) + if step_count > 0: + vcont_step_packets.append((step_count, pkt)) + + # Verify: exactly 1 breakpoint disable (z0) + self.assertEqual( + len(z0_packets), + 1, + "Expected 1 z0 (disable) packet, got {}: {}".format( + len(z0_packets), z0_packets + ), + ) + + # The initial Z0 is the breakpoint set. After stepping, there should + # be exactly 1 re-enable Z0 (total Z0 count = 2: set + re-enable). + # But we set the breakpoint via SB API, so count Z0 packets with + # our address, initial set + 1 re-enable = 2. + self.assertEqual( + len(Z0_packets), + 2, + "Expected 2 Z0 packets (1 set + 1 re-enable), got {}: {}".format( + len(Z0_packets), Z0_packets + ), + ) + + # At least one batched vCont with multiple step actions. + max_batch = max((count for count, _ in vcont_step_packets), default=0) + self.assertGreaterEqual( + max_batch, + NUM_THREADS, + "Expected a vCont with {} step actions (batched), " + "but max was {}. Packets: {}".format( + NUM_THREADS, + max_batch, + [(c, p) for c, p in vcont_step_packets], + ), + ) diff --git a/lldb/test/API/functionalities/thread/concurrent_events/TestConcurrentBatchedBreakpointStepOver.py b/lldb/test/API/functionalities/thread/concurrent_events/TestConcurrentBatchedBreakpointStepOver.py new file mode 100644 index 0000000000000..4f480e64a9dbd --- /dev/null +++ b/lldb/test/API/functionalities/thread/concurrent_events/TestConcurrentBatchedBreakpointStepOver.py @@ -0,0 +1,127 @@ +""" +Test that the batched breakpoint step-over optimization activates when +multiple threads hit the same breakpoint. Verifies that the optimization +reduces breakpoint toggle operations compared to stepping one at a time. +""" + +import os +import re + +from lldbsuite.test.decorators import * +from lldbsuite.test.concurrent_base import ConcurrentEventsBase +from lldbsuite.test.lldbtest import TestBase + + +@skipIfWindows +class ConcurrentBatchedBreakpointStepOver(ConcurrentEventsBase): + @skipIf(triple="^mips") + @skipIf(archs=["aarch64"]) + def test(self): + """Test that batched breakpoint step-over reduces breakpoint + toggle operations when multiple threads hit the same breakpoint.""" + self.build() + + num_threads = 10 + + # Enable logging to capture optimization messages and GDB packets. + lldb_logfile = self.getBuildArtifact("lldb-log.txt") + self.runCmd("log enable lldb step break -f {}".format(lldb_logfile)) + + gdb_logfile = self.getBuildArtifact("gdb-remote-log.txt") + self.runCmd("log enable gdb-remote packets -f {}".format(gdb_logfile)) + + # Run with breakpoint threads. + self.do_thread_actions(num_breakpoint_threads=num_threads) + + self.assertTrue(os.path.isfile(lldb_logfile), "lldb log file not found") + with open(lldb_logfile, "r") as f: + lldb_log = f.read() + + # Verify the optimization activated by looking for "Registered thread" + # messages, which indicate threads were grouped for batching. + registered_matches = re.findall( + r"Registered thread 0x[0-9a-fA-F]+ stepping over " + r"breakpoint at (0x[0-9a-fA-F]+)", + lldb_log, + ) + self.assertGreater( + len(registered_matches), + 0, + "Expected batched breakpoint step-over optimization to be " + "used (no 'Registered thread' messages found in log).", + ) + thread_bp_addr = registered_matches[0] + + # Verify all threads completed their step-over. + completed_count = lldb_log.count("Completed step over breakpoint plan.") + self.assertGreaterEqual( + completed_count, + num_threads, + "Expected at least {} 'Completed step over breakpoint plan.' " + "messages (one per thread), but got {}.".format( + num_threads, completed_count + ), + ) + + # Count z0/Z0 packets for the thread breakpoint address. + # z0 = remove (disable) software breakpoint. + # Z0 = set (enable) software breakpoint. + # Strip the "0x" prefix and leading zeros to match the GDB packet + # format (which uses lowercase hex without "0x" prefix). + bp_addr_hex = thread_bp_addr[2:].lstrip("0") if thread_bp_addr else "" + + z0_count = 0 # disable packets + Z0_count = 0 # enable packets + initial_Z0_seen = False + max_vcont_step_threads = 0 # largest number of s: actions in one vCont + + self.assertTrue(os.path.isfile(gdb_logfile), "gdb-remote log file not found") + with open(gdb_logfile, "r") as f: + for line in f: + if "send packet: $" not in line: + continue + + # Match z0,<addr> (disable) or Z0,<addr> (enable). + m = re.search(r"send packet: \$([Zz])0,([0-9a-fA-F]+),", line) + if m and m.group(2) == bp_addr_hex: + if m.group(1) == "Z": + if not initial_Z0_seen: + initial_Z0_seen = True + else: + Z0_count += 1 + else: + z0_count += 1 + + # Count step actions in vCont packets to detect batching. + # A batched vCont looks like: vCont;s:tid1;s:tid2;... + vcont_m = re.search(r"send packet: \$vCont((?:;[^#]+)*)", line) + if vcont_m: + actions = vcont_m.group(1) + step_count = len(re.findall(r";s:", actions)) + if step_count > max_vcont_step_threads: + max_vcont_step_threads = step_count + + # With the optimization, fewer breakpoint toggles should occur. + # Without optimization we'd see num_threads z0 and num_threads Z0. + # With batching, even partial, we expect fewer toggles. + self.assertLess( + z0_count, + num_threads, + "Expected fewer than {} breakpoint disables (z0) due to " + "batching, but got {}.".format(num_threads, z0_count), + ) + self.assertLess( + Z0_count, + num_threads, + "Expected fewer than {} breakpoint re-enables (Z0) due to " + "batching, but got {}.".format(num_threads, Z0_count), + ) + + # Verify at least one batched vCont packet contained multiple + # step actions, proving threads were stepped together. + self.assertGreater( + max_vcont_step_threads, + 1, + "Expected at least one batched vCont packet with multiple " + "step actions (s:), but the maximum was {}.".format(max_vcont_step_threads), + ) _______________________________________________ lldb-commits mailing list [email protected] https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits
