This patch reinforces the lockdep checks performed by
perf_cgroup_from_tsk() by passing the perf_event_context
whenever possible. It is okay to not hold the rcu read lock
when we know we hold the ctx->lock. This patch makes sure this
property holds.

In some functions, such as perf_cgroup_sched_in(), we do not
pass the context because we are sure we hold the rcu read lock.

Signed-off-by: Stephane Eranian <eran...@google.com>
---
 arch/x86/kernel/cpu/perf_event_intel_cqm.c |  2 +-
 include/linux/perf_event.h                 |  6 ++++--
 kernel/events/core.c                       | 20 +++++++++++++-------
 3 files changed, 18 insertions(+), 10 deletions(-)

diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c 
b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
index 377e8f8..a316ca9 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
@@ -298,7 +298,7 @@ static bool __match_event(struct perf_event *a, struct 
perf_event *b)
 static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
 {
        if (event->attach_state & PERF_ATTACH_TASK)
-               return perf_cgroup_from_task(event->hw.target);
+               return perf_cgroup_from_task(event->hw.target, event->ctx);
 
        return event->cgrp;
 }
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index d841d33..f9828a4 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -697,9 +697,11 @@ struct perf_cgroup {
  * if there is no cgroup event for the current CPU context.
  */
 static inline struct perf_cgroup *
-perf_cgroup_from_task(struct task_struct *task)
+perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
 {
-       return container_of(task_css(task, perf_event_cgrp_id),
+       return container_of(task_css_check(task, perf_event_cgrp_id,
+                                          ctx ? lockdep_is_held(&ctx->lock)
+                                              : true),
                            struct perf_cgroup, css);
 }
 #endif /* CONFIG_CGROUP_PERF */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index ea0bdc5..3dbc3c2 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -435,7 +435,7 @@ static inline void update_cgrp_time_from_event(struct 
perf_event *event)
        if (!is_cgroup_event(event))
                return;
 
-       cgrp = perf_cgroup_from_task(current);
+       cgrp = perf_cgroup_from_task(current, event->ctx);
        /*
         * Do not update time when cgroup is not active
         */
@@ -458,7 +458,7 @@ perf_cgroup_set_timestamp(struct task_struct *task,
        if (!task || !ctx->nr_cgroups)
                return;
 
-       cgrp = perf_cgroup_from_task(task);
+       cgrp = perf_cgroup_from_task(task, ctx);
        info = this_cpu_ptr(cgrp->info);
        info->timestamp = ctx->timestamp;
 }
@@ -521,8 +521,10 @@ static void perf_cgroup_switch(struct task_struct *task, 
int mode)
                                 * set cgrp before ctxsw in to allow
                                 * event_filter_match() to not have to pass
                                 * task around
+                                * we pass the cpuctx->ctx to 
perf_cgroup_from_task()
+                                * because cgorup events are only per-cpu
                                 */
-                               cpuctx->cgrp = perf_cgroup_from_task(task);
+                               cpuctx->cgrp = perf_cgroup_from_task(task, 
&cpuctx->ctx);
                                cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
                        }
                        perf_pmu_enable(cpuctx->ctx.pmu);
@@ -542,15 +544,17 @@ static inline void perf_cgroup_sched_out(struct 
task_struct *task,
        rcu_read_lock();
        /*
         * we come here when we know perf_cgroup_events > 0
+        * we do not need to pass the ctx here because we know
+        * we are holding the rcu lock
         */
-       cgrp1 = perf_cgroup_from_task(task);
+       cgrp1 = perf_cgroup_from_task(task, NULL);
 
        /*
         * next is NULL when called from perf_event_enable_on_exec()
         * that will systematically cause a cgroup_switch()
         */
        if (next)
-               cgrp2 = perf_cgroup_from_task(next);
+               cgrp2 = perf_cgroup_from_task(next, NULL);
 
        /*
         * only schedule out current cgroup events if we know
@@ -572,11 +576,13 @@ static inline void perf_cgroup_sched_in(struct 
task_struct *prev,
        rcu_read_lock();
        /*
         * we come here when we know perf_cgroup_events > 0
+        * we do not need to pass the ctx here because we know
+        * we are holding the rcu lock
         */
-       cgrp1 = perf_cgroup_from_task(task);
+       cgrp1 = perf_cgroup_from_task(task, NULL);
 
        /* prev can never be NULL */
-       cgrp2 = perf_cgroup_from_task(prev);
+       cgrp2 = perf_cgroup_from_task(prev, NULL);
 
        /*
         * only need to schedule in cgroup events if we are changing
-- 
2.5.0

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to