On Tue, May 28, 2019 at 05:16:24PM +0200, Daniel Bristot de Oliveira wrote:
>  #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
>                               defined(CONFIG_TRACE_PREEMPT_TOGGLE))
> +
> +DEFINE_PER_CPU(int, __traced_preempt_count) = 0;
>  /*
>   * If the value passed in is equal to the current preempt count
>   * then we just disabled preemption. Start timing the latency.
>   */
>  void preempt_latency_start(int val)
>  {
> -     if (preempt_count() == val) {
> +     int curr = this_cpu_read(__traced_preempt_count);

We actually have this_cpu_add_return();

> +
> +     if (!curr) {
>               unsigned long ip = get_lock_parent_ip();
>  #ifdef CONFIG_DEBUG_PREEMPT
>               current->preempt_disable_ip = ip;
>  #endif
>               trace_preempt_off(CALLER_ADDR0, ip);
>       }
> +
> +     this_cpu_write(__traced_preempt_count, curr + val);
>  }
>  
>  static inline void preempt_add_start_latency(int val)
> @@ -3200,8 +3206,12 @@ NOKPROBE_SYMBOL(preempt_count_add);
>   */
>  void preempt_latency_stop(int val)
>  {
> -     if (preempt_count() == val)
> +     int curr = this_cpu_read(__traced_preempt_count) - val;

this_cpu_sub_return();

> +
> +     if (!curr)
>               trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
> +
> +     this_cpu_write(__traced_preempt_count, curr);
>  }

Can't say I love this, but it is miles better than the last patch.

Reply via email to