> diff --git a/include/linux/trace_recursion.h b/include/linux/trace_recursion.h
> index a9f9c57..101e1fb 100644
> --- a/include/linux/trace_recursion.h
> +++ b/include/linux/trace_recursion.h
> @@ -208,13 +208,29 @@ static __always_inline void trace_clear_recursion(int 
> bit)
>   * Use this for ftrace callbacks. This will detect if the function
>   * tracing recursed in the same context (normal vs interrupt),
>   *
> + * The ftrace_test_recursion_trylock() will disable preemption,
> + * which is required for the variant of synchronize_rcu() that is
> + * used to allow patching functions where RCU is not watching.
> + * See klp_synchronize_transition() for more details.
> + *

I think that you misunderstood. Steven proposed to put the comment before 
ftrace_test_recursion_trylock() call site in klp_ftrace_handler().

>   * Returns: -1 if a recursion happened.
>   *           >= 0 if no recursion
>   */
>  static __always_inline int ftrace_test_recursion_trylock(unsigned long ip,
>                                                        unsigned long 
> parent_ip)
>  {
> -     return trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_START, 
> TRACE_FTRACE_MAX);
> +     int bit;
> +
> +     bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_START, 
> TRACE_FTRACE_MAX);
> +     /*
> +      * The zero bit indicate we are nested
> +      * in another trylock(), which means the
> +      * preemption already disabled.
> +      */
> +     if (bit > 0)
> +             preempt_disable_notrace();
> +
> +     return bit;
>  }

[...]

> diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c
> index e8029ae..6e66ccd 100644
> --- a/kernel/livepatch/patch.c
> +++ b/kernel/livepatch/patch.c
> @@ -52,11 +52,6 @@ static void notrace klp_ftrace_handler(unsigned long ip,

Here

>       bit = ftrace_test_recursion_trylock(ip, parent_ip);
>       if (WARN_ON_ONCE(bit < 0))
>               return;
> -     /*
> -      * A variant of synchronize_rcu() is used to allow patching functions
> -      * where RCU is not watching, see klp_synchronize_transition().
> -      */
> -     preempt_disable_notrace();
> 
>       func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
>                                     stack_node);
> @@ -120,7 +115,6 @@ static void notrace klp_ftrace_handler(unsigned long ip,
>       klp_arch_set_pc(fregs, (unsigned long)func->new_func);
> 
>  unlock:
> -     preempt_enable_notrace();
>       ftrace_test_recursion_unlock(bit);
>  }

Side note... the comment will eventually conflict with peterz's 
https://lore.kernel.org/all/20210929152429.125997...@infradead.org/.

Miroslav

Reply via email to