To support livepatch, we need to add arch_stack_walk_reliable to
support reliable stacktrace according to
https://docs.kernel.org/livepatch/reliable-stacktrace.html#requirements

report stacktrace is not reliable if we are not able to unwind the stack
by sframe unwinder and fallback to FP based unwinder

Signed-off-by: Weinan Liu <wn...@google.com>
---
 arch/arm64/include/asm/stacktrace/common.h |  2 +
 arch/arm64/kernel/stacktrace.c             | 47 +++++++++++++++++++++-
 2 files changed, 47 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/include/asm/stacktrace/common.h 
b/arch/arm64/include/asm/stacktrace/common.h
index 19edae8a5b1a..26449cd402db 100644
--- a/arch/arm64/include/asm/stacktrace/common.h
+++ b/arch/arm64/include/asm/stacktrace/common.h
@@ -26,6 +26,7 @@ struct stack_info {
  * @stacks:      An array of stacks which can be unwound.
  * @nr_stacks:   The number of stacks in @stacks.
  * @cfa:         The sp value at the call site of the current function.
+ * @unreliable:  Stacktrace is unreliable.
  */
 struct unwind_state {
        unsigned long fp;
@@ -36,6 +37,7 @@ struct unwind_state {
        int nr_stacks;
 #ifdef CONFIG_SFRAME_UNWINDER
        unsigned long cfa;
+       bool unreliable;
 #endif
 };
 
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index c035adb8fe8a..eab16dc05bb5 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -310,11 +310,16 @@ kunwind_next(struct kunwind_state *state)
        case KUNWIND_SOURCE_TASK:
        case KUNWIND_SOURCE_REGS_PC:
 #ifdef CONFIG_SFRAME_UNWINDER
-       err = unwind_next_frame_sframe(&state->common);
+       if (!state->common.unreliable)
+               err = unwind_next_frame_sframe(&state->common);
 
        /* Fallback to FP based unwinder */
-       if (err)
+       if (err || state->common.unreliable) {
                err = kunwind_next_frame_record(state);
+               /* Mark its stacktrace result as unreliable if it is unwindable 
via FP */
+               if (!err)
+                       state->common.unreliable = true;
+       }
 #else
        err = kunwind_next_frame_record(state);
 #endif
@@ -446,6 +451,44 @@ noinline noinstr void 
arch_stack_walk(stack_trace_consume_fn consume_entry,
        kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
 }
 
+#ifdef CONFIG_SFRAME_UNWINDER
+struct kunwind_reliable_consume_entry_data {
+       stack_trace_consume_fn consume_entry;
+       void *cookie;
+       bool unreliable;
+};
+
+static __always_inline bool
+arch_kunwind_reliable_consume_entry(const struct kunwind_state *state, void 
*cookie)
+{
+       struct kunwind_reliable_consume_entry_data *data = cookie;
+
+       if (state->common.unreliable) {
+               data->unreliable = true;
+               return false;
+       }
+       return data->consume_entry(data->cookie, state->common.pc);
+}
+
+noinline notrace int arch_stack_walk_reliable(
+                               stack_trace_consume_fn consume_entry,
+                               void *cookie, struct task_struct *task)
+{
+       struct kunwind_reliable_consume_entry_data data = {
+               .consume_entry = consume_entry,
+               .cookie = cookie,
+               .unreliable = false,
+       };
+
+       kunwind_stack_walk(arch_kunwind_reliable_consume_entry, &data, task, 
NULL);
+
+       if (data.unreliable)
+               return -EINVAL;
+
+       return 0;
+}
+#endif
+
 struct bpf_unwind_consume_entry_data {
        bool (*consume_entry)(void *cookie, u64 ip, u64 sp, u64 fp);
        void *cookie;
-- 
2.48.1.262.g85cc9f2d1e-goog


Reply via email to