jingham updated this revision to Diff 252982.
jingham marked 2 inline comments as done.
jingham added a comment.

Addressed most of Pavel's review comments.


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D76814/new/

https://reviews.llvm.org/D76814

Files:
  lldb/include/lldb/Target/Process.h
  lldb/include/lldb/Target/Thread.h
  lldb/include/lldb/Target/ThreadPlan.h
  lldb/include/lldb/Target/ThreadPlanStack.h
  lldb/source/Commands/CommandObjectThread.cpp
  lldb/source/Commands/Options.td
  lldb/source/Target/Process.cpp
  lldb/source/Target/TargetProperties.td
  lldb/source/Target/Thread.cpp
  lldb/source/Target/ThreadList.cpp
  lldb/source/Target/ThreadPlan.cpp
  lldb/source/Target/ThreadPlanStack.cpp
  lldb/source/Target/ThreadPlanStepOut.cpp
  
lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/Makefile
  
lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/TestOSPluginStepping.py
  
lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/main.cpp
  
lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/operating_system.py
  lldb/test/API/functionalities/thread_plan/Makefile
  lldb/test/API/functionalities/thread_plan/TestThreadPlanCommands.py
  lldb/test/API/functionalities/thread_plan/main.c

Index: lldb/test/API/functionalities/thread_plan/main.c
===================================================================
--- /dev/null
+++ lldb/test/API/functionalities/thread_plan/main.c
@@ -0,0 +1,16 @@
+#include <stdio.h>
+
+void
+call_me(int value) {
+  printf("called with %d\n", value); // Set another here.
+}
+
+int
+main(int argc, char **argv)
+{
+  call_me(argc); // Set a breakpoint here.
+  printf("This just spaces the two calls\n");
+  call_me(argc); // Run here to step over again.
+  printf("More spacing\n");
+  return 0; // Make sure we get here on last continue
+}
Index: lldb/test/API/functionalities/thread_plan/TestThreadPlanCommands.py
===================================================================
--- /dev/null
+++ lldb/test/API/functionalities/thread_plan/TestThreadPlanCommands.py
@@ -0,0 +1,160 @@
+"""
+Test that thread plan listing, and deleting works.
+"""
+
+
+
+import lldb
+import lldbsuite.test.lldbutil as lldbutil
+from lldbsuite.test.lldbtest import *
+
+
+class TestThreadPlanCommands(TestBase):
+
+    mydir = TestBase.compute_mydir(__file__)
+
+    NO_DEBUG_INFO_TESTCASE = True
+
+    def test_thread_plan_actions(self):
+        self.build()
+        self.main_source_file = lldb.SBFileSpec("main.c")
+        self.thread_plan_test()
+
+    def check_list_output(self, command, active_plans = [], completed_plans = [], discarded_plans = []):
+        # Check the "thread plan list" output against a list of active & completed and discarded plans.
+        # If all three check arrays are empty, that means the command is expected to fail. 
+
+        interp = self.dbg.GetCommandInterpreter()
+        result = lldb.SBCommandReturnObject()
+
+        num_active = len(active_plans)
+        num_completed = len(completed_plans)
+        num_discarded = len(discarded_plans)
+
+        interp.HandleCommand(command, result)
+        if num_active == 0 and num_completed == 0 and num_discarded == 0:
+            self.assertFalse(result.Succeeded(), "command: '%s' succeeded when it should have failed: '%s'"%
+                             (command, result.GetError()))
+            return
+
+        self.assertTrue(result.Succeeded(), "command: '%s' failed: '%s'"%(command, result.GetError()))
+        result_arr = result.GetOutput().splitlines()
+        num_results = len(result_arr)
+
+        # Match the expected number of elements.
+        # Adjust the count for the number of header lines we aren't matching:
+        fudge = 0
+        
+        if num_completed == 0 and num_discarded == 0:
+            # The fudge is 3: Thread header, Active Plan header and base plan
+            fudge = 3
+        elif num_completed == 0 or num_discarded == 0:
+            # The fudge is 4: The above plus either the Completed or Discarded Plan header:
+            fudge = 4
+        else:
+            # The fudge is 5 since we have both headers:
+            fudge = 5
+
+        self.assertEqual(num_results, num_active + num_completed + num_discarded + fudge,
+                             "Too many elements in match arrays")
+            
+        # Now iterate through the results array and pick out the results.
+        result_idx = 0
+        self.assertTrue("thread #" in result_arr[result_idx], "Found thread header") ; result_idx += 1
+        self.assertTrue("Active plan stack" in result_arr[result_idx], "Found active header") ; result_idx += 1
+        self.assertTrue("Element 0: Base thread plan" in result_arr[result_idx], "Found base plan") ; result_idx += 1
+
+        for text in active_plans:
+            self.assertFalse("Completed plan stack" in result_arr[result_idx], "Found Completed header too early.")
+            self.assertTrue(text in result_arr[result_idx], "Didn't find active plan: %s"%(text)) ; result_idx += 1
+
+        if len(completed_plans) > 0:
+            self.assertTrue("Completed plan stack:" in result_arr[result_idx], "Found completed plan stack header") ; result_idx += 1
+            for text in completed_plans:
+                self.assertTrue(text in result_arr[result_idx], "Didn't find completed plan: %s"%(text)) ; result_idx += 1
+
+        if len(discarded_plans) > 0:
+            self.assertTrue("Discarded plan stack:" in result_arr[result_idx], "Found discarded plan stack header") ; result_idx += 1
+            for text in discarded_plans:
+                self.assertTrue(text in result_arr[result_idx], "Didn't find completed plan: %s"%(text)) ; result_idx += 1
+
+
+    def thread_plan_test(self):
+        (target, process, thread, bkpt) = lldbutil.run_to_source_breakpoint(self,
+                                   "Set a breakpoint here", self.main_source_file)
+
+        # Now set a breakpoint in call_me and step over.  We should have
+        # two public thread plans
+        call_me_bkpt = target.BreakpointCreateBySourceRegex("Set another here", self.main_source_file)
+        self.assertTrue(call_me_bkpt.GetNumLocations() > 0, "Set the breakpoint successfully")
+        thread.StepOver()
+        threads = lldbutil.get_threads_stopped_at_breakpoint(process, call_me_bkpt)
+        self.assertEqual(len(threads), 1, "Hit my breakpoint while stepping over")
+
+        current_id = threads[0].GetIndexID()
+        current_tid = threads[0].GetThreadID()
+        # Run thread plan list without the -i flag:
+        command = "thread plan list %d"%(current_id)
+        self.check_list_output (command, ["Stepping over line main.c"], [])
+
+        # Run thread plan list with the -i flag:
+        command = "thread plan list -i %d"%(current_id)
+        self.check_list_output(command, ["Stepping over line main.c", "Stepping out from"])
+
+        # Run thread plan list providing TID, output should be the same:
+        command = "thread plan list -t %d"%(current_tid)
+        self.check_list_output(command, ["Stepping over line main.c"])
+
+        # Provide both index & tid, and make sure we only print once:
+        command = "thread plan list -t %d %d"%(current_tid, current_id)
+        self.check_list_output(command, ["Stepping over line main.c"])
+
+        # Try a fake TID, and make sure that fails:
+        fake_tid = 0
+        for i in range(100, 10000, 100):
+            fake_tid = current_tid + i
+            thread = process.GetThreadByID(fake_tid)
+            if not thread:
+                break
+        
+        command = "thread plan list -t %d"%(fake_tid)
+        self.check_list_output(command)
+
+        # Now continue, and make sure we printed the completed plan:
+        process.Continue()
+        threads = lldbutil.get_stopped_threads(process, lldb.eStopReasonPlanComplete)
+        self.assertEqual(len(threads), 1, "One thread completed a step")
+        
+        # Run thread plan list - there aren't any private plans at this point:
+        command = "thread plan list %d"%(current_id)
+        self.check_list_output(command, [], ["Stepping over line main.c"])
+
+        # Set another breakpoint that we can run to, to try deleting thread plans.
+        second_step_bkpt = target.BreakpointCreateBySourceRegex("Run here to step over again",
+                                                                self.main_source_file)
+        self.assertTrue(second_step_bkpt.GetNumLocations() > 0, "Set the breakpoint successfully")
+        final_bkpt = target.BreakpointCreateBySourceRegex("Make sure we get here on last continue",
+                                                          self.main_source_file)
+        self.assertTrue(final_bkpt.GetNumLocations() > 0, "Set the breakpoint successfully")
+
+        threads = lldbutil.continue_to_breakpoint(process, second_step_bkpt)
+        self.assertEqual(len(threads), 1, "Hit the second step breakpoint")
+
+        threads[0].StepOver()
+        threads = lldbutil.get_threads_stopped_at_breakpoint(process, call_me_bkpt)
+
+        result = lldb.SBCommandReturnObject()
+        interp = self.dbg.GetCommandInterpreter()
+        interp.HandleCommand("thread plan discard 1", result)
+        self.assertTrue(result.Succeeded(), "Deleted the step over plan: %s"%(result.GetOutput()))
+
+        # Make sure the plan gets listed in the discarded plans:
+        command = "thread plan list %d"%(current_id)
+        self.check_list_output(command, [], [], ["Stepping over line main.c:"])
+
+        process.Continue()
+        threads = lldbutil.get_threads_stopped_at_breakpoint(process, final_bkpt)
+        self.assertEqual(len(threads), 1, "Ran to final breakpoint")
+        threads = lldbutil.get_stopped_threads(process, lldb.eStopReasonPlanComplete)
+        self.assertEqual(len(threads), 0, "Did NOT complete the step over plan")
+
Index: lldb/test/API/functionalities/thread_plan/Makefile
===================================================================
--- /dev/null
+++ lldb/test/API/functionalities/thread_plan/Makefile
@@ -0,0 +1,4 @@
+C_SOURCES := main.c
+CFLAGS_EXTRAS := -std=c99
+
+include Makefile.rules
Index: lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/operating_system.py
===================================================================
--- /dev/null
+++ lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/operating_system.py
@@ -0,0 +1,62 @@
+#!/usr/bin/python
+
+import lldb
+import struct
+
+
+class OperatingSystemPlugIn(object):
+    """Class that provides a OS plugin that along with the particular code in main.cpp
+       emulates the following scenario:
+             a) We stop in an OS Plugin created thread - which should be thread index 1
+             b) We step-out from that thread
+             c) We hit a breakpoint in another thread, and DON'T produce the OS Plugin thread.
+             d) We continue, and when we hit the step out breakpoint, we again produce the same
+                OS Plugin thread.
+             main.cpp sets values into the global variable g_value, which we use to tell the OS
+             plugin whether to produce the OS plugin thread or not.
+             Since we are always producing an OS plugin thread with a backing thread, we don't
+             need to implement get_register_info or get_register_data.
+    """
+
+    def __init__(self, process):
+        '''Initialization needs a valid.SBProcess object.
+
+        This plug-in will get created after a live process is valid and has stopped for the
+        first time.'''
+        print("Plugin initialized.")
+        self.process = None
+        self.start_stop_id = 0
+        self.g_value = lldb.SBValue()
+        
+        if isinstance(process, lldb.SBProcess) and process.IsValid():
+            self.process = process
+            self.g_value = process.GetTarget().FindFirstGlobalVariable("g_value")
+            if not self.g_value.IsValid():
+                print("Could not find g_value")
+            
+    def create_thread(self, tid, context):
+        print("Called create thread with tid: ", tid)
+        return None
+
+    def get_thread_info(self):
+        g_value = self.g_value.GetValueAsUnsigned()
+        print("Called get_thread_info: g_value: %d"%(g_value))
+        if g_value == 0 or g_value == 2:
+            return [{'tid': 0x111111111,
+                             'name': 'one',
+                             'queue': 'queue1',
+                             'state': 'stopped',
+                             'stop_reason': 'breakpoint',
+                             'core' : 1 }]
+        else:
+            return []
+
+    def get_register_info(self):
+        print ("called get_register_info")
+        return None
+
+    
+    def get_register_data(self, tid):
+        print("Get register data called for tid: %d"%(tid))
+        return None
+
Index: lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/main.cpp
===================================================================
--- /dev/null
+++ lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/main.cpp
@@ -0,0 +1,51 @@
+// This test will present lldb with two threads one of which the test will
+// overlay with an OSPlugin thread.  Then we'll do a step out on the thread_1,
+// but arrange to hit a breakpoint in main before the step out completes. At
+// that point we will not report an OS plugin thread for thread_1. Then we'll
+// run again and hit the step out breakpoint.  Make sure we haven't deleted
+// that, and recognize it.
+
+#include <condition_variable>
+#include <mutex>
+#include <stdio.h>
+#include <thread>
+
+static int g_value = 0; // I don't have access to the real threads in the
+                        // OS Plugin, and I don't want to have to count
+                        // StopID's. So I'm using this value to tell me which
+                        // stop point the program has reached.
+std::mutex g_mutex;
+std::condition_variable g_cv;
+
+void step_out_of_here() {
+  std::unique_lock<std::mutex> func_lock(g_mutex);
+  // Set a breakpoint:first stop in thread - do a step out.
+  g_cv.notify_one();
+  g_cv.wait(func_lock, [&] { return g_value == 2; });
+}
+
+void *thread_func() {
+  // Do something
+  step_out_of_here();
+
+  // Return
+  return NULL;
+}
+
+int main() {
+  // Lock the mutex so we can block the thread:
+  std::unique_lock<std::mutex> main_lock(g_mutex);
+  // Create the thread
+  std::thread thread_1(thread_func);
+  g_cv.wait(main_lock);
+  g_value = 1;
+  // Stop here and do not make a memory thread for thread_1.
+  g_cv.notify_one();
+  g_value = 2;
+  main_lock.unlock();
+
+  // Wait for the threads to finish
+  thread_1.join();
+
+  return 0;
+}
Index: lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/TestOSPluginStepping.py
===================================================================
--- /dev/null
+++ lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/TestOSPluginStepping.py
@@ -0,0 +1,113 @@
+"""
+Test that stepping works even when the OS Plugin doesn't report
+all threads at every stop.
+"""
+
+from __future__ import print_function
+
+
+import os
+import lldb
+from lldbsuite.test.lldbtest import *
+import lldbsuite.test.lldbutil as lldbutil
+
+
+class TestOSPluginStepping(TestBase):
+
+    mydir = TestBase.compute_mydir(__file__)
+    NO_DEBUG_INFO_TESTCASE = True
+
+    def test_python_os_plugin(self):
+        """Test that stepping works when the OS Plugin doesn't report all
+           threads at every stop"""
+        self.build()
+        self.main_file = lldb.SBFileSpec('main.cpp')
+        self.run_python_os_step_missing_thread(False)
+
+    def test_python_os_plugin_prune(self):
+        """Test that pruning the unreported PlanStacks works"""
+        self.build()
+        self.main_file = lldb.SBFileSpec('main.cpp')
+        self.run_python_os_step_missing_thread(True)
+
+    def get_os_thread(self):
+        return self.process.GetThreadByID(0x111111111)
+
+    def is_os_thread(self, thread):
+        id = thread.GetID()
+        return id == 0x111111111
+    
+    def run_python_os_step_missing_thread(self, do_prune):
+        """Test that the Python operating system plugin works correctly"""
+
+        # Our OS plugin does NOT report all threads:
+        result = self.dbg.HandleCommand("settings set target.process.plugin-reports-all-threads false")
+
+        python_os_plugin_path = os.path.join(self.getSourceDir(),
+                                             "operating_system.py")
+        (target, self.process, thread, thread_bkpt) = lldbutil.run_to_source_breakpoint(
+            self, "first stop in thread - do a step out", self.main_file)
+
+        main_bkpt = target.BreakpointCreateBySourceRegex('Stop here and do not make a memory thread for thread_1',
+                                                         self.main_file)
+        self.assertEqual(main_bkpt.GetNumLocations(), 1, "Main breakpoint has one location")
+
+        # There should not be an os thread before we load the plugin:
+        self.assertFalse(self.get_os_thread().IsValid(), "No OS thread before loading plugin")
+        
+        # Now load the python OS plug-in which should update the thread list and we should have
+        # an OS plug-in thread overlaying thread_1 with id 0x111111111
+        command = "settings set target.process.python-os-plugin-path '%s'" % python_os_plugin_path
+        self.dbg.HandleCommand(command)
+
+        # Verify our OS plug-in threads showed up
+        os_thread = self.get_os_thread()
+        self.assertTrue(
+            os_thread.IsValid(),
+            "Make sure we added the thread 0x111111111 after we load the python OS plug-in")
+        
+        # Now we are going to step-out.  This should get interrupted by main_bkpt.  We've
+        # set up the OS plugin so at this stop, we have lost the OS thread 0x111111111.
+        # Make sure both of these are true:
+        os_thread.StepOut()
+        
+        stopped_threads = lldbutil.get_threads_stopped_at_breakpoint(self.process, main_bkpt)
+        self.assertEqual(len(stopped_threads), 1, "Stopped at main_bkpt")
+        thread = self.process.GetThreadByID(0x111111111)
+        self.assertFalse(thread.IsValid(), "No thread 0x111111111 on second stop.")
+        
+        # Make sure we still have the thread plans for this thread:
+        # First, don't show unreported threads, that should fail:
+        command = "thread plan list -t 0x111111111"
+        result = lldb.SBCommandReturnObject()
+        interp = self.dbg.GetCommandInterpreter() 
+        interp.HandleCommand(command, result)
+        self.assertFalse(result.Succeeded(), "We found no plans for the unreported thread.")
+        # Now do it again but with the -u flag:
+        command	= "thread plan list -u -t 0x111111111"
+        result = lldb.SBCommandReturnObject()
+        interp.HandleCommand(command, result)
+        self.assertTrue(result.Succeeded(), "We found plans for the unreported thread.")
+        
+        if do_prune:
+            # Prune the thread plan and continue, and we will run to exit.
+            interp.HandleCommand("thread plan prune 0x111111111", result)
+            self.assertTrue(result.Succeeded(), "Found the plan for 0x111111111 and pruned it")
+
+            # List again, make sure it doesn't work:
+            command	= "thread plan list -u -t 0x111111111"
+            interp.HandleCommand(command, result)
+            self.assertFalse(result.Succeeded(), "We still found plans for the unreported thread.")
+            
+            self.process.Continue()
+            self.assertEqual(self.process.GetState(), lldb.eStateExited, "We exited.")
+        else:
+            # Now we are going to continue, and when we hit the step-out breakpoint, we will
+            # put the OS plugin thread back, lldb will recover its ThreadPlanStack, and
+            # we will stop with a "step-out" reason.
+            self.process.Continue()
+            os_thread = self.get_os_thread()
+            self.assertTrue(os_thread.IsValid(), "The OS thread is back after continue")
+            self.assertTrue("step out" in os_thread.GetStopDescription(100), "Completed step out plan")
+        
+        
Index: lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/Makefile
===================================================================
--- /dev/null
+++ lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/Makefile
@@ -0,0 +1,4 @@
+CXX_SOURCES := main.cpp
+ENABLE_THREADS := YES
+
+include Makefile.rules
Index: lldb/source/Target/ThreadPlanStepOut.cpp
===================================================================
--- lldb/source/Target/ThreadPlanStepOut.cpp
+++ lldb/source/Target/ThreadPlanStepOut.cpp
@@ -188,7 +188,7 @@
 
 ThreadPlanStepOut::~ThreadPlanStepOut() {
   if (m_return_bp_id != LLDB_INVALID_BREAK_ID)
-    GetThread().CalculateTarget()->RemoveBreakpointByID(m_return_bp_id);
+    GetTarget().RemoveBreakpointByID(m_return_bp_id);
 }
 
 void ThreadPlanStepOut::GetDescription(Stream *s,
@@ -204,7 +204,7 @@
       s->Printf("Stepping out from ");
       Address tmp_address;
       if (tmp_address.SetLoadAddress(m_step_from_insn, &GetTarget())) {
-        tmp_address.Dump(s, &GetThread(), Address::DumpStyleResolvedDescription,
+        tmp_address.Dump(s, &m_process, Address::DumpStyleResolvedDescription,
                          Address::DumpStyleLoadAddress);
       } else {
         s->Printf("address 0x%" PRIx64 "", (uint64_t)m_step_from_insn);
@@ -216,7 +216,7 @@
 
       s->Printf(" returning to frame at ");
       if (tmp_address.SetLoadAddress(m_return_addr, &GetTarget())) {
-        tmp_address.Dump(s, &GetThread(), Address::DumpStyleResolvedDescription,
+        tmp_address.Dump(s, &m_process, Address::DumpStyleResolvedDescription,
                          Address::DumpStyleLoadAddress);
       } else {
         s->Printf("address 0x%" PRIx64 "", (uint64_t)m_return_addr);
@@ -227,6 +227,9 @@
     }
   }
 
+  if (m_stepped_past_frames.empty())
+    return;
+
   s->Printf("\n");
   for (StackFrameSP frame_sp : m_stepped_past_frames) {
     s->Printf("Stepped out past: ");
Index: lldb/source/Target/ThreadPlanStack.cpp
===================================================================
--- lldb/source/Target/ThreadPlanStack.cpp
+++ lldb/source/Target/ThreadPlanStack.cpp
@@ -16,48 +16,70 @@
 using namespace lldb;
 using namespace lldb_private;
 
-static void PrintPlanElement(Stream *s, const ThreadPlanSP &plan,
+static void PrintPlanElement(Stream &s, const ThreadPlanSP &plan,
                              lldb::DescriptionLevel desc_level,
                              int32_t elem_idx) {
-  s->IndentMore();
-  s->Indent();
-  s->Printf("Element %d: ", elem_idx);
-  plan->GetDescription(s, desc_level);
-  s->EOL();
-  s->IndentLess();
+  s.IndentMore();
+  s.Indent();
+  s.Printf("Element %d: ", elem_idx);
+  plan->GetDescription(&s, desc_level);
+  s.EOL();
+  s.IndentLess();
 }
 
-void ThreadPlanStack::DumpThreadPlans(Stream *s,
+ThreadPlanStack::ThreadPlanStack(const Thread &thread, bool make_null) {
+  if (make_null) {
+    // The ThreadPlanNull doesn't do anything to the Thread, so this is actually
+    // still a const operation.
+    m_plans.push_back(
+        ThreadPlanSP(new ThreadPlanNull(const_cast<Thread &>(thread))));
+  }
+}
+
+void ThreadPlanStack::DumpThreadPlans(Stream &s,
                                       lldb::DescriptionLevel desc_level,
                                       bool include_internal) const {
 
   uint32_t stack_size;
 
-  s->IndentMore();
-  s->Indent();
-  s->Printf("Active plan stack:\n");
-  int32_t print_idx = 0;
-  for (auto plan : m_plans) {
-    PrintPlanElement(s, plan, desc_level, print_idx++);
-  }
+  s.IndentMore();
+  PrintOneStack(s, "Active plan stack", m_plans, desc_level, include_internal);
+  PrintOneStack(s, "Completed plan stack", m_completed_plans, desc_level,
+                include_internal);
+  PrintOneStack(s, "Discarded plan stack", m_discarded_plans, desc_level,
+                include_internal);
+  s.IndentLess();
+}
 
-  if (AnyCompletedPlans()) {
-    print_idx = 0;
-    s->Indent();
-    s->Printf("Completed Plan Stack:\n");
-    for (auto plan : m_completed_plans)
-      PrintPlanElement(s, plan, desc_level, print_idx++);
+void ThreadPlanStack::PrintOneStack(Stream &s, llvm::StringRef stack_name,
+                                    const PlanStack &stack,
+                                    lldb::DescriptionLevel desc_level,
+                                    bool include_internal) const {
+  // If the stack is empty, just exit:
+  if (stack.empty())
+    return;
+
+  // Make sure there are public completed plans:
+  bool any_public = false;
+  if (!include_internal) {
+    for (auto plan : stack) {
+      if (!plan->GetPrivate()) {
+        any_public = true;
+        break;
+      }
+    }
   }
 
-  if (AnyDiscardedPlans()) {
-    print_idx = 0;
-    s->Indent();
-    s->Printf("Discarded Plan Stack:\n");
-    for (auto plan : m_discarded_plans)
+  if (include_internal || any_public) {
+    int print_idx = 0;
+    s.Indent();
+    s.Printf("%s:\n", stack_name);
+    for (auto plan : stack) {
+      if (!include_internal && plan->GetPrivate())
+        continue;
       PrintPlanElement(s, plan, desc_level, print_idx++);
+    }
   }
-
-  s->IndentLess();
 }
 
 size_t ThreadPlanStack::CheckpointCompletedPlans() {
@@ -368,3 +390,123 @@
   }
   llvm_unreachable("Invalid StackKind value");
 }
+
+void ThreadPlanStackMap::Update(ThreadList &current_threads,
+                                bool delete_missing,
+                                bool check_for_new) {
+
+  // Now find all the new threads and add them to the map:
+  if (check_for_new) {
+    for (auto thread : current_threads.Threads()) {
+      lldb::tid_t cur_tid = thread->GetID();
+      if (!Find(cur_tid)) {
+        AddThread(*thread.get());
+        thread->QueueFundamentalPlan(true);
+      }
+    }
+  }
+
+  // If we aren't reaping missing threads at this point,
+  // we are done.
+  if (!delete_missing)
+    return;
+  // Otherwise scan for absent TID's.
+  std::vector<lldb::tid_t> missing_threads;
+  // If we are going to delete plans from the plan stack,
+  // then scan for absent TID's:
+  for (auto thread_plans : m_plans_list) {
+    lldb::tid_t cur_tid = thread_plans.first;
+    ThreadSP thread_sp = current_threads.FindThreadByID(cur_tid);
+    if (!thread_sp)
+      missing_threads.push_back(cur_tid);
+  }
+  for (lldb::tid_t tid : missing_threads) {
+    RemoveTID(tid);
+  }
+}
+
+void ThreadPlanStackMap::DumpPlans(Stream &strm,
+                                   lldb::DescriptionLevel desc_level,
+                                   bool internal, bool condense_if_trivial,
+                                   bool skip_unreported) {
+  for (auto elem : m_plans_list) {
+    lldb::tid_t tid = elem.first;
+    uint32_t index_id = 0;
+    ThreadSP thread_sp = m_process.GetThreadList().FindThreadByID(tid);
+
+    if (skip_unreported) {
+      if (!thread_sp)
+        continue;
+    }
+    if (thread_sp)
+      index_id = thread_sp->GetIndexID();
+
+    if (condense_if_trivial) {
+      if (!elem.second.AnyPlans() && !elem.second.AnyCompletedPlans() &&
+          !elem.second.AnyDiscardedPlans()) {
+        strm.Printf("thread #%u: tid = 0x%4.4" PRIx64 "\n", index_id, tid);
+        strm.IndentMore();
+        strm.Indent();
+        strm.Printf("No active thread plans\n");
+        strm.IndentLess();
+        return;
+      }
+    }
+
+    strm.Indent();
+    strm.Printf("thread #%u: tid = 0x%4.4" PRIx64 ":\n", index_id, tid);
+
+    elem.second.DumpThreadPlans(strm, desc_level, internal);
+  }
+}
+
+bool ThreadPlanStackMap::DumpPlansForTID(Stream &strm, lldb::tid_t tid,
+                                         lldb::DescriptionLevel desc_level,
+                                         bool internal,
+                                         bool condense_if_trivial,
+                                         bool skip_unreported) {
+  uint32_t index_id = 0;
+  ThreadSP thread_sp = m_process.GetThreadList().FindThreadByID(tid);
+
+  if (skip_unreported) {
+    if (!thread_sp) {
+      strm.Format("Unknown TID: {0}", tid);
+      return false;
+    }
+  }
+
+  if (thread_sp)
+    index_id = thread_sp->GetIndexID();
+  ThreadPlanStack *stack = Find(tid);
+  if (!stack) {
+    strm.Format("Unknown TID: {0}\n", tid);
+    return false;
+  }
+
+  if (condense_if_trivial) {
+    if (!stack->AnyPlans() && !stack->AnyCompletedPlans() &&
+        !stack->AnyDiscardedPlans()) {
+      strm.Printf("thread #%u: tid = 0x%4.4" PRIx64 "\n", index_id, tid);
+      strm.IndentMore();
+      strm.Indent();
+      strm.Printf("No active thread plans\n");
+      strm.IndentLess();
+      return true;
+    }
+  }
+
+  strm.Indent();
+  strm.Printf("thread #%u: tid = 0x%4.4" PRIx64 ":\n", index_id, tid);
+
+  stack->DumpThreadPlans(strm, desc_level, internal);
+  return true;
+}
+
+bool ThreadPlanStackMap::PrunePlansForTID(lldb::tid_t tid) {
+  // We only remove the plans for unreported TID's.
+  ThreadSP thread_sp = m_process.GetThreadList().FindThreadByID(tid);
+  if (thread_sp)
+    return false;
+
+  return RemoveTID(tid);
+}
Index: lldb/source/Target/ThreadPlan.cpp
===================================================================
--- lldb/source/Target/ThreadPlan.cpp
+++ lldb/source/Target/ThreadPlan.cpp
@@ -21,7 +21,7 @@
 // ThreadPlan constructor
 ThreadPlan::ThreadPlan(ThreadPlanKind kind, const char *name, Thread &thread,
                        Vote stop_vote, Vote run_vote)
-    : m_process(*thread.GetProcess().get()), m_tid(thread.GetID()), 
+    : m_process(*thread.GetProcess().get()), m_tid(thread.GetID()),
       m_stop_vote(stop_vote), m_run_vote(run_vote),
       m_takes_iteration_count(false), m_could_not_resolve_hw_bp(false),
       m_kind(kind), m_thread(&thread), m_name(name), m_plan_complete_mutex(),
@@ -41,7 +41,7 @@
 Thread &ThreadPlan::GetThread() {
   if (m_thread)
     return *m_thread;
-    
+
   ThreadSP thread_sp = m_process.GetThreadList().FindThreadByID(m_tid);
   m_thread = thread_sp.get();
   return *m_thread;
@@ -127,13 +127,17 @@
           "%s Thread #%u (0x%p): tid = 0x%4.4" PRIx64 ", pc = 0x%8.8" PRIx64
           ", sp = 0x%8.8" PRIx64 ", fp = 0x%8.8" PRIx64 ", "
           "plan = '%s', state = %s, stop others = %d",
-          __FUNCTION__, GetThread().GetIndexID(), 
+          __FUNCTION__, GetThread().GetIndexID(),
           static_cast<void *>(&GetThread()), m_tid, static_cast<uint64_t>(pc),
           static_cast<uint64_t>(sp), static_cast<uint64_t>(fp), m_name.c_str(),
           StateAsCString(resume_state), StopOthers());
     }
   }
-  return DoWillResume(resume_state, current_plan);
+  bool success = DoWillResume(resume_state, current_plan);
+  m_thread = nullptr; // We don't cache the thread pointer over resumes.  This
+                      // Thread might go away, and another Thread represent
+                      // the same underlying object on a later stop.
+  return success;
 }
 
 lldb::user_id_t ThreadPlan::GetNextID() {
Index: lldb/source/Target/ThreadList.cpp
===================================================================
--- lldb/source/Target/ThreadList.cpp
+++ lldb/source/Target/ThreadList.cpp
@@ -715,6 +715,11 @@
     // to work around the issue
     collection::iterator rhs_pos, rhs_end = rhs.m_threads.end();
     for (rhs_pos = rhs.m_threads.begin(); rhs_pos != rhs_end; ++rhs_pos) {
+      // If this thread has already been destroyed, we don't need to look for
+      // it to destroy it again.
+      if (!(*rhs_pos)->IsValid())
+        continue;
+
       const lldb::tid_t tid = (*rhs_pos)->GetID();
       bool thread_is_alive = false;
       const uint32_t num_threads = m_threads.size();
@@ -728,7 +733,6 @@
       }
       if (!thread_is_alive) {
         (*rhs_pos)->DestroyThread();
-        m_process->RemoveThreadPlansForTID((*rhs_pos)->GetID());
       }
     }
   }
Index: lldb/source/Target/Thread.cpp
===================================================================
--- lldb/source/Target/Thread.cpp
+++ lldb/source/Target/Thread.cpp
@@ -240,10 +240,6 @@
             static_cast<void *>(this), GetID());
 
   CheckInWithManager();
-  
-  process.AddThreadPlansForThread(*this);
-  
-  QueueFundamentalPlan(true);
 }
 
 Thread::~Thread() {
@@ -781,7 +777,9 @@
     LLDB_LOGF(log, "^^^^^^^^ Thread::ShouldStop Begin ^^^^^^^^");
     StreamString s;
     s.IndentMore();
-    DumpThreadPlans(&s);
+    GetProcess()->DumpThreadPlansForTID(
+        s, GetID(), eDescriptionLevelVerbose, true /* internal */,
+        false /* condense_trivial */, true /* skip_unreported */);
     LLDB_LOGF(log, "Plan stack initial state:\n%s", s.GetData());
   }
 
@@ -945,7 +943,9 @@
   if (log) {
     StreamString s;
     s.IndentMore();
-    DumpThreadPlans(&s);
+    GetProcess()->DumpThreadPlansForTID(
+        s, GetID(), eDescriptionLevelVerbose, true /* internal */,
+        false /* condense_trivial */, true /* skip_unreported */);
     LLDB_LOGF(log, "Plan stack final state:\n%s", s.GetData());
     LLDB_LOGF(log, "vvvvvvvv Thread::ShouldStop End (returning %i) vvvvvvvv",
               should_stop);
@@ -1051,8 +1051,18 @@
 
 ThreadPlanStack &Thread::GetPlans() const {
   ThreadPlanStack *plans = GetProcess()->FindThreadPlans(GetID());
-  assert(plans && "Can't have a thread with no plans");
-  return *plans;
+  if (plans)
+    return *plans;
+
+  // History threads don't have a thread plan, but they do ask get asked to
+  // describe themselves, which usually involves pulling out the stop reason.
+  // That in turn will check for a completed plan on the ThreadPlanStack.
+  // Instead of special-casing at that point, we return a Stack with a
+  // ThreadPlanNull as its base plan.  That will give the right answers to the
+  // queries GetDescription makes, and only assert if you try to run the thread.
+  if (!m_null_plan_stack_up)
+    m_null_plan_stack_up.reset(new ThreadPlanStack(*this, true));
+  return *(m_null_plan_stack_up.get());
 }
 
 void Thread::PushPlan(ThreadPlanSP thread_plan_sp) {
@@ -1372,26 +1382,6 @@
 
 uint32_t Thread::GetIndexID() const { return m_index_id; }
 
-void Thread::DumpThreadPlans(Stream *s, lldb::DescriptionLevel desc_level,
-                             bool include_internal,
-                             bool ignore_boring_threads) const {
-  if (ignore_boring_threads) {
-    if (!GetPlans().AnyPlans() && !GetPlans().AnyCompletedPlans()
-        && !GetPlans().AnyDiscardedPlans()) {
-      s->Printf("thread #%u: tid = 0x%4.4" PRIx64 "\n", GetIndexID(), GetID());
-      s->IndentMore();
-      s->Indent();
-      s->Printf("No active thread plans\n");
-      s->IndentLess();
-      return;
-    }
-  }
-  
-  s->Indent();
-  s->Printf("thread #%u: tid = 0x%4.4" PRIx64 ":\n", GetIndexID(), GetID());
-  GetPlans().DumpThreadPlans(s, desc_level, include_internal);
-}
-
 TargetSP Thread::CalculateTarget() {
   TargetSP target_sp;
   ProcessSP process_sp(GetProcess());
Index: lldb/source/Target/TargetProperties.td
===================================================================
--- lldb/source/Target/TargetProperties.td
+++ lldb/source/Target/TargetProperties.td
@@ -180,6 +180,10 @@
   def PythonOSPluginPath: Property<"python-os-plugin-path", "FileSpec">,
     DefaultUnsignedValue<1>,
     Desc<"A path to a python OS plug-in module file that contains a OperatingSystemPlugIn class.">;
+  def PluginReportsAllThreads: Property<"plugin-reports-all-threads", "Boolean">,
+    Global,
+    DefaultTrue,
+    Desc<"Set to False if your OS Plugins doesn't report all threads on each stop.">;
   def StopOnSharedLibraryEvents: Property<"stop-on-sharedlibrary-events", "Boolean">,
     Global,
     DefaultFalse,
Index: lldb/source/Target/Process.cpp
===================================================================
--- lldb/source/Target/Process.cpp
+++ lldb/source/Target/Process.cpp
@@ -179,6 +179,17 @@
   m_collection_sp->SetPropertyAtIndexAsFileSpec(nullptr, idx, file);
 }
 
+bool ProcessProperties::GetPluginReportsAllThreads() const {
+  const uint32_t idx = ePropertyPluginReportsAllThreads;
+  return m_collection_sp->GetPropertyAtIndexAsBoolean(
+      nullptr, idx, g_process_properties[idx].default_uint_value != 0);
+}
+
+void ProcessProperties::SetPluginReportsAllThreads(bool does_report) {
+  const uint32_t idx = ePropertyPluginReportsAllThreads;
+  m_collection_sp->SetPropertyAtIndexAsBoolean(nullptr, idx, does_report);
+}
+
 bool ProcessProperties::GetIgnoreBreakpointsInExpressions() const {
   const uint32_t idx = ePropertyIgnoreBreakpointsInExpressions;
   return m_collection_sp->GetPropertyAtIndexAsBoolean(
@@ -478,7 +489,7 @@
       m_mod_id(), m_process_unique_id(0), m_thread_index_id(0),
       m_thread_id_to_index_id_map(), m_exit_status(-1), m_exit_string(),
       m_exit_status_mutex(), m_thread_mutex(), m_thread_list_real(this),
-      m_thread_list(this), m_extended_thread_list(this),
+      m_thread_list(this), m_thread_plans(*this), m_extended_thread_list(this),
       m_extended_thread_stop_id(0), m_queue_list(this), m_queue_list_stop_id(0),
       m_notifications(), m_image_tokens(), m_listener_sp(listener_sp),
       m_breakpoint_site_list(), m_dynamic_checkers_up(),
@@ -1184,9 +1195,12 @@
   const uint32_t stop_id = GetStopID();
   if (m_thread_list.GetSize(false) == 0 ||
       stop_id != m_thread_list.GetStopID()) {
+    bool clear_unused_threads = true;
     const StateType state = GetPrivateState();
     if (StateIsStoppedState(state, true)) {
       std::lock_guard<std::recursive_mutex> guard(m_thread_list.GetMutex());
+      m_thread_list.SetStopID(stop_id);
+
       // m_thread_list does have its own mutex, but we need to hold onto the
       // mutex between the call to UpdateThreadList(...) and the
       // os->UpdateThreadList(...) so it doesn't change on us
@@ -1207,6 +1221,10 @@
           size_t num_old_threads = old_thread_list.GetSize(false);
           for (size_t i = 0; i < num_old_threads; ++i)
             old_thread_list.GetThreadAtIndex(i, false)->ClearBackingThread();
+          // See if the OS plugin reports all threads.  If it does, then
+          // it is safe to clear unseen thread's plans here.  Otherwise we 
+          // should preserve them in case they show up again:
+          clear_unused_threads = GetPluginReportsAllThreads();
 
           // Turn off dynamic types to ensure we don't run any expressions.
           // Objective-C can run an expression to determine if a SBValue is a
@@ -1233,7 +1251,7 @@
             target.SetPreferDynamicValue(saved_prefer_dynamic);
         } else {
           // No OS plug-in, the new thread list is the same as the real thread
-          // list
+          // list.
           new_thread_list = real_thread_list;
         }
 
@@ -1250,6 +1268,12 @@
           m_queue_list_stop_id = GetLastNaturalStopID();
         }
       }
+      // Now update the plan stack map.
+      // If we do have an OS plugin, any absent real threads in the
+      // m_thread_list have already been removed from the ThreadPlanStackMap.
+      // So any remaining threads are OS Plugin threads, and those we want to
+      // preserve in case they show up again.
+      m_thread_plans.Update(m_thread_list, clear_unused_threads);
     }
   }
 }
@@ -1258,14 +1282,26 @@
   return m_thread_plans.Find(tid);
 }
 
-void Process::AddThreadPlansForThread(Thread &thread) {
-  if (m_thread_plans.Find(thread.GetID()))
-    return;
-  m_thread_plans.AddThread(thread);
+bool Process::PruneThreadPlansForTID(lldb::tid_t tid) {
+  return m_thread_plans.PrunePlansForTID(tid);
 }
 
-void Process::RemoveThreadPlansForTID(lldb::tid_t tid) {
-  m_thread_plans.RemoveTID(tid);
+void Process::PruneThreadPlans() {
+  m_thread_plans.Update(GetThreadList(), true, false);
+}
+
+bool Process::DumpThreadPlansForTID(Stream &strm, lldb::tid_t tid,
+                                    lldb::DescriptionLevel desc_level,
+                                    bool internal, bool condense_trivial,
+                                    bool skip_unreported_plans) {
+  return m_thread_plans.DumpPlansForTID(
+      strm, tid, desc_level, internal, condense_trivial, skip_unreported_plans);
+}
+void Process::DumpThreadPlans(Stream &strm, lldb::DescriptionLevel desc_level,
+                              bool internal, bool condense_trivial,
+                              bool skip_unreported_plans) {
+  m_thread_plans.DumpPlans(strm, desc_level, internal, condense_trivial,
+                           skip_unreported_plans);
 }
 
 void Process::UpdateQueueListIfNeeded() {
Index: lldb/source/Commands/Options.td
===================================================================
--- lldb/source/Commands/Options.td
+++ lldb/source/Commands/Options.td
@@ -969,6 +969,11 @@
     Desc<"Display more information about the thread plans">;
   def thread_plan_list_internal : Option<"internal", "i">, Group<1>,
     Desc<"Display internal as well as user thread plans">;
+  def thread_plan_list_thread_id : Option<"thread-id", "t">, Group<1>,
+    Arg<"ThreadID">, Desc<"List the thread plans for this TID, can be "
+    "specified more than once.">;
+  def thread_plan_list_unreported : Option<"unreported", "u">, Group<1>,
+    Desc<"Display thread plans for unreported threads">;
 }
 
 let Command = "type summary add" in {
Index: lldb/source/Commands/CommandObjectThread.cpp
===================================================================
--- lldb/source/Commands/CommandObjectThread.cpp
+++ lldb/source/Commands/CommandObjectThread.cpp
@@ -1833,25 +1833,36 @@
 
     Status SetOptionValue(uint32_t option_idx, llvm::StringRef option_arg,
                           ExecutionContext *execution_context) override {
-      Status error;
       const int short_option = m_getopt_table[option_idx].val;
 
       switch (short_option) {
       case 'i':
         m_internal = true;
         break;
+      case 't':
+        lldb::tid_t tid;
+        if (option_arg.getAsInteger(0, tid))
+          return Status("invalid tid: '%s'.", option_arg.str().c_str());
+        m_tids.push_back(tid);
+        break;
+      case 'u':
+        m_unreported = false;
+        break;
       case 'v':
         m_verbose = true;
         break;
       default:
         llvm_unreachable("Unimplemented option");
       }
-      return error;
+      return {};
     }
 
     void OptionParsingStarting(ExecutionContext *execution_context) override {
       m_verbose = false;
       m_internal = false;
+      m_unreported = true; // The variable is "skip unreported" and we want to
+                           // skip unreported by default.
+      m_tids.clear();
     }
 
     llvm::ArrayRef<OptionDefinition> GetDefinitions() override {
@@ -1861,6 +1872,8 @@
     // Instance variables to hold the values for command options.
     bool m_verbose;
     bool m_internal;
+    bool m_unreported;
+    std::vector<lldb::tid_t> m_tids;
   };
 
   CommandObjectThreadPlanList(CommandInterpreter &interpreter)
@@ -1879,25 +1892,59 @@
 
   Options *GetOptions() override { return &m_options; }
 
+  bool DoExecute(Args &command, CommandReturnObject &result) override {
+    // If we are reporting all threads, dispatch to the Process to do that:
+    if (command.GetArgumentCount() == 0 && m_options.m_tids.empty()) {
+      Stream &strm = result.GetOutputStream();
+      DescriptionLevel desc_level = m_options.m_verbose
+                                        ? eDescriptionLevelVerbose
+                                        : eDescriptionLevelFull;
+      m_exe_ctx.GetProcessPtr()->DumpThreadPlans(
+          strm, desc_level, m_options.m_internal, true, m_options.m_unreported);
+      result.SetStatus(eReturnStatusSuccessFinishResult);
+      return true;
+    } else {
+      // Do any TID's that the user may have specified as TID, then do any
+      // Thread Indexes...
+      if (!m_options.m_tids.empty()) {
+        Process *process = m_exe_ctx.GetProcessPtr();
+        StreamString tmp_strm;
+        for (lldb::tid_t tid : m_options.m_tids) {
+          bool success = process->DumpThreadPlansForTID(
+              tmp_strm, tid, eDescriptionLevelFull, m_options.m_internal,
+              true /* condense_trivial */, m_options.m_unreported);
+          // If we didn't find a TID, stop here and return an error.
+          if (!success) {
+            result.SetError("Error dumping plans:");
+            result.AppendError(tmp_strm.GetString());
+            result.SetStatus(eReturnStatusFailed);
+            return false;
+          }
+          // Otherwise, add our data to the output:
+          result.GetOutputStream() << tmp_strm.GetString();
+        }
+      }
+      return CommandObjectIterateOverThreads::DoExecute(command, result);
+    }
+  }
+
 protected:
   bool HandleOneThread(lldb::tid_t tid, CommandReturnObject &result) override {
-    ThreadSP thread_sp =
-        m_exe_ctx.GetProcessPtr()->GetThreadList().FindThreadByID(tid);
-    if (!thread_sp) {
-      result.AppendErrorWithFormat("thread no longer exists: 0x%" PRIx64 "\n",
-                                   tid);
-      result.SetStatus(eReturnStatusFailed);
-      return false;
-    }
+    // If we have already handled this from a -t option, skip it here.
+    if (std::find(m_options.m_tids.begin(), m_options.m_tids.end(), tid) !=
+        m_options.m_tids.end())
+      return true;
 
-    Thread *thread = thread_sp.get();
+    Process *process = m_exe_ctx.GetProcessPtr();
 
     Stream &strm = result.GetOutputStream();
     DescriptionLevel desc_level = eDescriptionLevelFull;
     if (m_options.m_verbose)
       desc_level = eDescriptionLevelVerbose;
 
-    thread->DumpThreadPlans(&strm, desc_level, m_options.m_internal, true);
+    process->DumpThreadPlansForTID(strm, tid, desc_level, m_options.m_internal,
+                                   true /* condense_trivial */,
+                                   m_options.m_unreported);
     return true;
   }
 
@@ -1974,6 +2021,75 @@
   }
 };
 
+class CommandObjectThreadPlanPrune : public CommandObjectParsed {
+public:
+  CommandObjectThreadPlanPrune(CommandInterpreter &interpreter)
+      : CommandObjectParsed(interpreter, "thread plan prune",
+                            "Removes any thread plans associated with "
+                            "currently unreported threads.  "
+                            "Specify one or more TID's to remove, or if no "
+                            "TID's are provides, remove threads for all "
+                            "unreported threads",
+                            nullptr,
+                            eCommandRequiresProcess |
+                                eCommandTryTargetAPILock |
+                                eCommandProcessMustBeLaunched |
+                                eCommandProcessMustBePaused) {
+    CommandArgumentEntry arg;
+    CommandArgumentData tid_arg;
+
+    // Define the first (and only) variant of this arg.
+    tid_arg.arg_type = eArgTypeThreadID;
+    tid_arg.arg_repetition = eArgRepeatStar;
+
+    // There is only one variant this argument could be; put it into the
+    // argument entry.
+    arg.push_back(tid_arg);
+
+    // Push the data for the first argument into the m_arguments vector.
+    m_arguments.push_back(arg);
+  }
+
+  ~CommandObjectThreadPlanPrune() override = default;
+
+  bool DoExecute(Args &args, CommandReturnObject &result) override {
+    Process *process = m_exe_ctx.GetProcessPtr();
+    
+    if (args.GetArgumentCount() == 0) {
+      process->PruneThreadPlans();
+      result.SetStatus(eReturnStatusSuccessFinishNoResult);
+      return true;  
+    }
+
+    bool success;
+    const size_t num_args = args.GetArgumentCount();
+
+    std::lock_guard<std::recursive_mutex> guard(
+        process->GetThreadList().GetMutex());
+
+    for (size_t i = 0; i < num_args; i++) {
+      bool success;
+
+      lldb::tid_t tid = StringConvert::ToUInt64(
+          args.GetArgumentAtIndex(i), 0, 0, &success);
+      if (!success) {
+        result.AppendErrorWithFormat("invalid thread specification: \"%s\"\n",
+                                     args.GetArgumentAtIndex(i));
+        result.SetStatus(eReturnStatusFailed);
+        return false;
+      }
+      if (!process->PruneThreadPlansForTID(tid)) {
+        result.AppendErrorWithFormat("Could not find unreported tid: \"%s\"\n",
+                                     args.GetArgumentAtIndex(i));
+        result.SetStatus(eReturnStatusFailed);
+        return false;
+      }
+    }
+    result.SetStatus(eReturnStatusSuccessFinishNoResult);
+    return true;
+  }
+};
+
 // CommandObjectMultiwordThreadPlan
 
 class CommandObjectMultiwordThreadPlan : public CommandObjectMultiword {
@@ -1988,6 +2104,9 @@
     LoadSubCommand(
         "discard",
         CommandObjectSP(new CommandObjectThreadPlanDiscard(interpreter)));
+    LoadSubCommand(
+        "prune",
+        CommandObjectSP(new CommandObjectThreadPlanPrune(interpreter)));
   }
 
   ~CommandObjectMultiwordThreadPlan() override = default;
Index: lldb/include/lldb/Target/ThreadPlanStack.h
===================================================================
--- lldb/include/lldb/Target/ThreadPlanStack.h
+++ lldb/include/lldb/Target/ThreadPlanStack.h
@@ -32,14 +32,14 @@
   friend class lldb_private::Thread;
 
 public:
-  ThreadPlanStack(Thread &thread) {}
+  ThreadPlanStack(const Thread &thread, bool make_empty = false);
   ~ThreadPlanStack() {}
 
   enum StackKind { ePlans, eCompletedPlans, eDiscardedPlans };
 
   using PlanStack = std::vector<lldb::ThreadPlanSP>;
 
-  void DumpThreadPlans(Stream *s, lldb::DescriptionLevel desc_level,
+  void DumpThreadPlans(Stream &s, lldb::DescriptionLevel desc_level,
                        bool include_internal) const;
 
   size_t CheckpointCompletedPlans();
@@ -98,6 +98,10 @@
 private:
   const PlanStack &GetStackOfKind(ThreadPlanStack::StackKind kind) const;
 
+  void PrintOneStack(Stream &s, llvm::StringRef stack_name,
+                     const PlanStack &stack, lldb::DescriptionLevel desc_level,
+                     bool include_internal) const;
+
   PlanStack m_plans;           ///< The stack of plans this thread is executing.
   PlanStack m_completed_plans; ///< Plans that have been completed by this
                                /// stop.  They get deleted when the thread
@@ -112,9 +116,13 @@
 
 class ThreadPlanStackMap {
 public:
-  ThreadPlanStackMap() {}
+  ThreadPlanStackMap(Process &process) : m_process(process) {}
   ~ThreadPlanStackMap() {}
 
+  // Prune the map using the current_threads list.
+  void Update(ThreadList &current_threads, bool delete_missing,
+              bool check_for_new = true);
+
   void AddThread(Thread &thread) {
     lldb::tid_t tid = thread.GetID();
     auto result = m_plans_list.emplace(tid, thread);
@@ -143,7 +151,19 @@
     m_plans_list.clear();
   }
 
+  // Implements Process::DumpThreadPlans
+  void DumpPlans(Stream &strm, lldb::DescriptionLevel desc_level, bool internal,
+                 bool ignore_boring, bool skip_unreported);
+
+  // Implements Process::DumpThreadPlansForTID
+  bool DumpPlansForTID(Stream &strm, lldb::tid_t tid,
+                       lldb::DescriptionLevel desc_level, bool internal,
+                       bool ignore_boring, bool skip_unreported);
+                       
+  bool PrunePlansForTID(lldb::tid_t tid);
+
 private:
+  Process &m_process;
   using PlansList = std::unordered_map<lldb::tid_t, ThreadPlanStack>;
   PlansList m_plans_list;
 };
Index: lldb/include/lldb/Target/ThreadPlan.h
===================================================================
--- lldb/include/lldb/Target/ThreadPlan.h
+++ lldb/include/lldb/Target/ThreadPlan.h
@@ -376,7 +376,9 @@
   const Target &GetTarget() const;
 
   /// Print a description of this thread to the stream \a s.
-  /// \a thread.
+  /// \a thread.  Don't expect that the result of GetThread is valid in
+  /// the description method.  This might get called when the underlying
+  /// Thread has not been reported, so we only know the TID and not the thread.
   ///
   /// \param[in] s
   ///    The stream to which to print the description.
@@ -598,7 +600,9 @@
   // For ThreadPlan only
   static lldb::user_id_t GetNextID();
 
-  Thread *m_thread;
+  Thread *m_thread; // Stores a cached value of the thread, which is set to
+                    // nullptr when the thread resumes.  Don't use this anywhere
+                    // but ThreadPlan::GetThread().
   ThreadPlanKind m_kind;
   std::string m_name;
   std::recursive_mutex m_plan_complete_mutex;
Index: lldb/include/lldb/Target/Thread.h
===================================================================
--- lldb/include/lldb/Target/Thread.h
+++ lldb/include/lldb/Target/Thread.h
@@ -1019,16 +1019,6 @@
   ///    otherwise.
   bool DiscardUserThreadPlansUpToIndex(uint32_t thread_index);
 
-  /// Prints the current plan stack.
-  ///
-  /// \param[in] s
-  ///    The stream to which to dump the plan stack info.
-  ///
-  void DumpThreadPlans(
-      Stream *s,
-      lldb::DescriptionLevel desc_level = lldb::eDescriptionLevelVerbose,
-      bool include_internal = true, bool ignore_boring = false) const;
-
   virtual bool CheckpointThreadState(ThreadStateCheckpoint &saved_state);
 
   virtual bool
@@ -1186,7 +1176,7 @@
   // thread is still in good shape to call virtual thread methods.  This must
   // be called by classes that derive from Thread in their destructor.
   virtual void DestroyThread();
-  
+
   ThreadPlanStack &GetPlans() const;
 
   void PushPlan(lldb::ThreadPlanSP plan_sp);
@@ -1260,6 +1250,7 @@
   bool m_destroy_called; // This is used internally to make sure derived Thread
                          // classes call DestroyThread.
   LazyBool m_override_should_notify;
+  mutable std::unique_ptr<ThreadPlanStack> m_null_plan_stack_up;
 
 private:
   bool m_extended_info_fetched; // Have we tried to retrieve the m_extended_info
@@ -1267,7 +1258,6 @@
   StructuredData::ObjectSP m_extended_info; // The extended info for this thread
 
 private:
-  
   void BroadcastSelectedFrameChange(StackID &new_frame_id);
 
   DISALLOW_COPY_AND_ASSIGN(Thread);
Index: lldb/include/lldb/Target/Process.h
===================================================================
--- lldb/include/lldb/Target/Process.h
+++ lldb/include/lldb/Target/Process.h
@@ -73,6 +73,8 @@
   void SetExtraStartupCommands(const Args &args);
   FileSpec GetPythonOSPluginPath() const;
   void SetPythonOSPluginPath(const FileSpec &file);
+  bool GetPluginReportsAllThreads() const;
+  void SetPluginReportsAllThreads(bool stop);
   bool GetIgnoreBreakpointsInExpressions() const;
   void SetIgnoreBreakpointsInExpressions(bool ignore);
   bool GetUnwindOnErrorInExpressions() const;
@@ -2198,19 +2200,75 @@
   }
 
   void SetDynamicCheckers(DynamicCheckerFunctions *dynamic_checkers);
-  
+
+/// Prune ThreadPlanStacks for unreported threads.
+///
+/// \param[in] tid
+///     The tid whose Plan Stack we are seeking to prune.
+///
+/// \return
+///     \b true if the TID is found or \b false if not.
+bool PruneThreadPlansForTID(lldb::tid_t tid);
+
+/// Prune ThreadPlanStacks for all unreported threads.
+void PruneThreadPlans();
+
   /// Find the thread plan stack associated with thread with \a tid.
   ///
   /// \param[in] tid
-  ///     The tid whose Plan Stack we are seeking..
+  ///     The tid whose Plan Stack we are seeking.
   ///
   /// \return
   ///     Returns a ThreadPlan if the TID is found or nullptr if not.
   ThreadPlanStack *FindThreadPlans(lldb::tid_t tid);
-  
-  void AddThreadPlansForThread(Thread &thread);
-  
-  void RemoveThreadPlansForTID(lldb::tid_t tid);
+
+  /// Dump the thread plans associated with thread with \a tid.
+  ///
+  /// \param[in/out] strm
+  ///     The stream to which to dump the output
+  ///
+  /// \param[in] tid
+  ///     The tid whose Plan Stack we are dumping
+  ///
+  /// \param[in] desc_level
+  ///     How much detail to dump
+  ///
+  /// \param[in] internal
+  ///     If \b true dump all plans, if false only user initiated plans
+  ///
+  /// \param[in] condense_trivial
+  ///     If true, only dump a header if the plan stack is just the base plan.
+  ///
+  /// \param[in] skip_unreported_plans
+  ///     If true, only dump a plan if it is currently backed by an
+  ///     lldb_private::Thread *.
+  ///
+  /// \return
+  ///     Returns \b true if TID was found, \b false otherwise
+  bool DumpThreadPlansForTID(Stream &strm, lldb::tid_t tid,
+                             lldb::DescriptionLevel desc_level, bool internal,
+                             bool condense_trivial, bool skip_unreported_plans);
+
+  /// Dump all the thread plans for this process.
+  ///
+  /// \param[in/out] strm
+  ///     The stream to which to dump the output
+  ///
+  /// \param[in] desc_level
+  ///     How much detail to dump
+  ///
+  /// \param[in] internal
+  ///     If \b true dump all plans, if false only user initiated plans
+  ///
+  /// \param[in] condense_trivial
+  ///     If true, only dump a header if the plan stack is just the base plan.
+  ///
+  /// \param[in] skip_unreported_plans
+  ///     If true, skip printing all thread plan stacks that don't currently
+  ///     have a backing lldb_private::Thread *.
+  void DumpThreadPlans(Stream &strm, lldb::DescriptionLevel desc_level,
+                       bool internal, bool condense_trivial,
+                       bool skip_unreported_plans);
 
   /// Call this to set the lldb in the mode where it breaks on new thread
   /// creations, and then auto-restarts.  This is useful when you are trying
@@ -2547,7 +2605,7 @@
     virtual EventActionResult HandleBeingInterrupted() = 0;
     virtual const char *GetExitString() = 0;
     void RequestResume() { m_process->m_resume_requested = true; }
-    
+
   protected:
     Process *m_process;
   };
_______________________________________________
lldb-commits mailing list
lldb-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits

Reply via email to