On 3/7/24 08:26, Gustavo Romero wrote:
Save target's siginfo into gdbserver_state so it can be used later, for
example, in any stub that requires the target's si_signo and si_code.
This change affects only linux-user mode.
Signed-off-by: Gustavo Romero <gustavo.rom...@linaro.org>
Suggested-by: Richard Henderson <richard.hender...@linaro.org>
---
gdbstub/internals.h | 3 +++
gdbstub/user-target.c | 3 ++-
gdbstub/user.c | 14 ++++++++++----
include/gdbstub/user.h | 6 +++++-
linux-user/main.c | 2 +-
linux-user/signal.c | 5 ++++-
6 files changed, 25 insertions(+), 8 deletions(-)
diff --git a/gdbstub/internals.h b/gdbstub/internals.h
index 56b7c13b75..a7cc69dab3 100644
--- a/gdbstub/internals.h
+++ b/gdbstub/internals.h
@@ -58,6 +58,9 @@ typedef struct GDBState {
int line_csum; /* checksum at the end of the packet */
GByteArray *last_packet;
int signal;
+#ifdef CONFIG_USER_ONLY
+ uint8_t siginfo[MAX_SIGINFO_LENGTH];
+#endif
If we this in GDBUserState in user.c -- no need for ifdefs then.
--- a/gdbstub/user-target.c
+++ b/gdbstub/user-target.c
@@ -10,11 +10,12 @@
#include "qemu/osdep.h"
#include "exec/gdbstub.h"
#include "qemu.h"
-#include "internals.h"
#ifdef CONFIG_LINUX
#include "linux-user/loader.h"
#include "linux-user/qemu.h"
+#include "gdbstub/user.h"
#endif
+#include "internals.h"
/*
* Map target signal numbers to GDB protocol signal numbers and vice
Why are any changes required here?
Perhaps this is improper patch split from one of the others?
@@ -140,6 +141,11 @@ int gdb_handlesig(CPUState *cpu, int sig, const char
*reason)
return sig;
}
+ if (siginfo) {
+ /* Save target-specific siginfo. */
+ memcpy(gdbserver_state.siginfo, siginfo, siginfo_len);
+ }
A comment here about asserting the size at compile-time elsewhere would be welcome for
future code browsers.
Need to record siginfo_len for later use -- you don't want to expose all 128 bytes if the
actual structure is smaller.
@@ -510,7 +516,7 @@ void gdb_breakpoint_remove_all(CPUState *cs)
void gdb_syscall_handling(const char *syscall_packet)
{
gdb_put_packet(syscall_packet);
- gdb_handlesig(gdbserver_state.c_cpu, 0, NULL);
+ gdb_handlesig(gdbserver_state.c_cpu, 0, NULL, NULL, 0);
}
static bool should_catch_syscall(int num)
@@ -528,7 +534,7 @@ void gdb_syscall_entry(CPUState *cs, int num)
{
if (should_catch_syscall(num)) {
g_autofree char *reason = g_strdup_printf("syscall_entry:%x;", num);
- gdb_handlesig(cs, gdb_target_sigtrap(), reason);
+ gdb_handlesig(cs, gdb_target_sigtrap(), reason, NULL, 0);
}
}
@@ -536,7 +542,7 @@ void gdb_syscall_return(CPUState *cs, int num)
{
if (should_catch_syscall(num)) {
g_autofree char *reason = g_strdup_printf("syscall_return:%x;", num);
- gdb_handlesig(cs, gdb_target_sigtrap(), reason);
+ gdb_handlesig(cs, gdb_target_sigtrap(), reason, NULL, 0);
}
All of this makes me wonder if we should provide a different interface for syscalls, even
if it uses the same code paths internally.
Do we want to zero the gdbserver siginfo to indicate that the contents are no longer
valid? I know it's not a real signal delivered to the process, but might we need to
construct a simple siginfo struct to match the sigtrap?
r~