On Mon, Jun 30, 2014 at 04:50:39PM +0800, Yan, Zheng wrote:
> diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
> index 707617a..71fb77a 100644
> --- a/include/linux/perf_event.h
> +++ b/include/linux/perf_event.h
> @@ -262,6 +262,12 @@ struct pmu {
>        * flush branch stack on context-switches (needed in cpu-wide mode)
>        */
>       void (*flush_branch_stack)      (void);
> +
> +     /*
> +      * PMU callback for context-switches. optional
> +      */
> +     void (*sched_task)              (struct perf_event_context *ctx,
> +                                      bool sched_in);
>  };

Here you make it a general facility..

> +/*
> + * This function provides the context switch callback to the lower code
> + * layer. It is invoked ONLY when the context switch callback is enabled.
> + */
> +static void perf_pmu_sched_task(struct task_struct *prev,
> +                             struct task_struct *next,
> +                             bool sched_in)
> +{
> +     struct perf_cpu_context *cpuctx;
> +     struct pmu *pmu;
> +     unsigned long flags;
> +
> +     if (prev == next)
> +             return;
> +
> +     local_irq_save(flags);
> +
> +     rcu_read_lock();
> +
> +     list_for_each_entry_rcu(pmu, &pmus, entry) {
> +             if (pmu->sched_task) {
> +                     cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
> +
> +                     perf_ctx_lock(cpuctx, cpuctx->task_ctx);
> +
> +                     perf_pmu_disable(pmu);
> +
> +                     pmu->sched_task(cpuctx->task_ctx, sched_in);
> +
> +                     perf_pmu_enable(pmu);
> +
> +                     perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
> +                     /* only CPU PMU has context switch callback */
> +                     break;

And here you break it..

> +             }
> +     }
> +
> +     rcu_read_unlock();
> +
> +     local_irq_restore(flags);
> +}

Attachment: pgpANk2o22I_r.pgp
Description: PGP signature

Reply via email to