================ @@ -1196,6 +1202,62 @@ def terminate(self): self.process.wait() self.process = None + @classmethod + def launch( + cls, executable: str, /, connection=None, log_file=None, env=None + ) -> tuple[subprocess.Popen, str]: + adaptor_env = os.environ.copy() + if env: + adaptor_env.update(env) + + if log_file: + adaptor_env["LLDBDAP_LOG"] = log_file + + if os.uname().sysname == "Darwin": + adaptor_env["NSUnbufferedIO"] = "YES" + + args = [executable] + if connection: + args.append("--connection") + args.append(connection) + + proc = subprocess.Popen( + args, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=sys.stdout, + env=adaptor_env, + ) + + if connection: + # If a conneciton is specified, lldb-dap will print the listening + # address once the listener is made to stdout. The listener is + # formatted like `tcp://host:port` or `unix:///path`. + with selectors.DefaultSelector() as sel: + print("Reading stdout for the listening connection") + os.set_blocking(proc.stdout.fileno(), False) + stdout_key = sel.register(proc.stdout, selectors.EVENT_READ) + rdy_fds = sel.select(timeout=10.0) ---------------- labath wrote:
It looks like this will be a problem for windows (which can only `select` sockets). I can think of a couple of alternatives (writing the connection spec to a file -- and polling for it; using OS apis directly -- we have a `Pipe` class in `lldbgdbserverutils.py` for the lldb-server use case; doing a blocking read and relying on the test suite level timeout; ...), but neither of them is clearly superior. https://github.com/llvm/llvm-project/pull/116392 _______________________________________________ lldb-commits mailing list lldb-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits