On Wed, 21 Aug 2024 21:27:55 +0800
Jeff Xie <jeff....@linux.dev> wrote:

Hi Jeff,

Thanks for the patch.

> Signed-off-by: Jeff Xie <jeff....@linux.dev>
> ---
>  kernel/trace/trace_functions.c | 18 ++++++++++++++++++
>  1 file changed, 18 insertions(+)
> 
> diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
> index 3b0cea37e029..273b8c7eeb2d 100644
> --- a/kernel/trace/trace_functions.c
> +++ b/kernel/trace/trace_functions.c
> @@ -176,6 +176,19 @@ static void function_trace_start(struct trace_array *tr)
>       tracing_reset_online_cpus(&tr->array_buffer);
>  }
>  
> +static unsigned long
> +function_get_true_parent_ip(unsigned long parent_ip, struct ftrace_regs 
> *fregs)

I wonder if we should make this inline, or even __always_inline, as this
will be called in a very hot path, and I want to make sure that the
compiler always inlines it. It likely should, but we should also give the
compiler a hint that it should.

-- Steve


> +{
> +     unsigned long true_parent_ip;
> +     int idx = 0;
> +
> +     true_parent_ip = parent_ip;
> +     if (unlikely(parent_ip == (unsigned long)&return_to_handler))
> +             true_parent_ip = ftrace_graph_ret_addr(current, &idx, parent_ip,
> +                             (unsigned long *)fregs->regs.sp);
> +     return true_parent_ip;
> +}
> +
>  static void
>  function_trace_call(unsigned long ip, unsigned long parent_ip,
>                   struct ftrace_ops *op, struct ftrace_regs *fregs)
> @@ -193,6 +206,8 @@ function_trace_call(unsigned long ip, unsigned long 
> parent_ip,
>       if (bit < 0)
>               return;
>  
> +     parent_ip = function_get_true_parent_ip(parent_ip, fregs);
> +
>       trace_ctx = tracing_gen_ctx();
>  
>       cpu = smp_processor_id();
> @@ -241,6 +256,7 @@ function_stack_trace_call(unsigned long ip, unsigned long 
> parent_ip,
>        * recursive protection is performed.
>        */
>       local_irq_save(flags);
> +     parent_ip = function_get_true_parent_ip(parent_ip, fregs);
>       cpu = raw_smp_processor_id();
>       data = per_cpu_ptr(tr->array_buffer.data, cpu);
>       disabled = atomic_inc_return(&data->disabled);
> @@ -309,6 +325,7 @@ function_no_repeats_trace_call(unsigned long ip, unsigned 
> long parent_ip,
>       if (bit < 0)
>               return;
>  
> +     parent_ip = function_get_true_parent_ip(parent_ip, fregs);
>       cpu = smp_processor_id();
>       data = per_cpu_ptr(tr->array_buffer.data, cpu);
>       if (atomic_read(&data->disabled))
> @@ -356,6 +373,7 @@ function_stack_no_repeats_trace_call(unsigned long ip, 
> unsigned long parent_ip,
>        * recursive protection is performed.
>        */
>       local_irq_save(flags);
> +     parent_ip = function_get_true_parent_ip(parent_ip, fregs);
>       cpu = raw_smp_processor_id();
>       data = per_cpu_ptr(tr->array_buffer.data, cpu);
>       disabled = atomic_inc_return(&data->disabled);


Reply via email to