From: David Carrillo-Cisneros <davi...@google.com>

This patch adds support to monitor a cgroup x and a task p1
when p1 is part of cgroup x. Since we cannot write two RMIDs during
sched in the driver handles this.

This patch introduces a u32 *rmid in the task_struck which keeps track
of the RMIDs associated with the task.  There is also a list in the
arch_info of perf_cgroup called taskmon_list which keeps track of tasks
in the cgroup that are monitored.

The taskmon_list is modified in 2 scenarios.
- at event_init of task p1 which is part of a cgroup, add the p1 to the
cgroup->tskmon_list. At event_destroy delete the task from the list.
- at the time of task move from cgrp x to a cgp y, if task was monitored
remove the task from the cgrp x tskmon_list and add it the cgrp y
tskmon_list.

sched in: When the task p1 is scheduled in, we write the task RMID in
the PQR_ASSOC MSR

read(for task p1): As any other cqm task event

read(for the cgroup x): When counting for cgroup, the taskmon list is
traversed and the corresponding RMID counts are added.

Tests: Monitoring a cgroup x and a task with in the cgroup x should
work.

Patch modified/refactored by Vikas Shivappa
<vikas.shiva...@linux.intel.com> to support recycling removal,
changes in the arch_info.

Signed-off-by: Vikas Shivappa <vikas.shiva...@linux.intel.com>
---
 arch/x86/events/intel/cqm.c | 137 +++++++++++++++++++++++++++++++++++++++++++-
 include/linux/sched.h       |   3 +
 2 files changed, 137 insertions(+), 3 deletions(-)

diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
index 506e187..8017886 100644
--- a/arch/x86/events/intel/cqm.c
+++ b/arch/x86/events/intel/cqm.c
@@ -362,6 +362,36 @@ static void init_mbm_sample(u32 *rmid, u32 evt_type)
        on_each_cpu_mask(&cqm_cpumask, __intel_mbm_event_init, &rr, 1);
 }
 
+static inline int add_cgrp_tskmon_entry(u32 *rmid, struct list_head *l)
+{
+       struct tsk_rmid_entry *entry;
+
+       entry = kzalloc(sizeof(struct tsk_rmid_entry), GFP_KERNEL);
+       if (!entry)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&entry->list);
+       entry->rmid = rmid;
+
+       list_add_tail(&entry->list, l);
+
+       return 0;
+}
+
+static inline void del_cgrp_tskmon_entry(u32 *rmid, struct list_head *l)
+{
+       struct tsk_rmid_entry *entry = NULL, *tmp1;
+
+       list_for_each_entry_safe(entry, tmp1, l, list) {
+               if (entry->rmid == rmid) {
+
+                       list_del(&entry->list);
+                       kfree(entry);
+                       break;
+               }
+       }
+}
+
 #ifdef CONFIG_CGROUP_PERF
 struct cgrp_cqm_info *cqminfo_from_tsk(struct task_struct *tsk)
 {
@@ -379,6 +409,49 @@ struct cgrp_cqm_info *cqminfo_from_tsk(struct task_struct 
*tsk)
 }
 #endif
 
+static inline void
+       cgrp_tskmon_update(struct task_struct *tsk, u32 *rmid, bool ena)
+{
+       struct cgrp_cqm_info *ccinfo = NULL;
+
+#ifdef CONFIG_CGROUP_PERF
+       ccinfo = cqminfo_from_tsk(tsk);
+#endif
+       if (!ccinfo)
+               return;
+
+       if (ena)
+               add_cgrp_tskmon_entry(rmid, &ccinfo->tskmon_rlist);
+       else
+               del_cgrp_tskmon_entry(rmid, &ccinfo->tskmon_rlist);
+}
+
+static int cqm_assign_task_rmid(struct perf_event *event, u32 *rmid)
+{
+       struct task_struct *tsk;
+       int ret = 0;
+
+       rcu_read_lock();
+       tsk = event->hw.target;
+       if (pid_alive(tsk)) {
+               get_task_struct(tsk);
+
+               if (rmid != NULL)
+                       cgrp_tskmon_update(tsk, rmid, true);
+               else
+                       cgrp_tskmon_update(tsk, tsk->rmid, false);
+
+               tsk->rmid = rmid;
+
+               put_task_struct(tsk);
+       } else {
+               ret = -EINVAL;
+       }
+       rcu_read_unlock();
+
+       return ret;
+}
+
 static inline void cqm_enable_mon(struct cgrp_cqm_info *cqm_info, u32 *rmid)
 {
        if (rmid != NULL) {
@@ -428,8 +501,12 @@ static void cqm_assign_hier_rmid(struct 
cgroup_subsys_state *rcss, u32 *rmid)
 
 static int cqm_assign_rmid(struct perf_event *event, u32 *rmid)
 {
+       if (is_task_event(event)) {
+               if (cqm_assign_task_rmid(event, rmid))
+                       return -EINVAL;
+       }
 #ifdef CONFIG_CGROUP_PERF
-       if (is_cgroup_event(event)) {
+       else if (is_cgroup_event(event)) {
                cqm_assign_hier_rmid(&event->cgrp->css, rmid);
        }
 #endif
@@ -630,6 +707,8 @@ static u64 cqm_read_subtree(struct perf_event *event, 
struct rmid_read *rr)
 
        struct cgroup_subsys_state *rcss, *pos_css;
        struct cgrp_cqm_info *ccqm_info;
+       struct tsk_rmid_entry *entry;
+       struct list_head *l;
 
        cqm_mask_call_local(rr);
        local64_set(&event->count, atomic64_read(&(rr->value)));
@@ -645,6 +724,13 @@ static u64 cqm_read_subtree(struct perf_event *event, 
struct rmid_read *rr)
                /* Add the descendent 'monitored cgroup' counts */
                if (pos_css != rcss && ccqm_info->mon_enabled)
                        delta_local(event, rr, ccqm_info->rmid);
+
+               /* Add your and descendent 'monitored task' counts */
+               if (!list_empty(&ccqm_info->tskmon_rlist)) {
+                       l = &ccqm_info->tskmon_rlist;
+                       list_for_each_entry(entry, l, list)
+                               delta_local(event, rr, entry->rmid);
+               }
        }
        rcu_read_unlock();
 #endif
@@ -1095,10 +1181,55 @@ void perf_cgroup_arch_css_free(struct 
cgroup_subsys_state *css)
        mutex_unlock(&cache_mutex);
 }
 
+/*
+ * Called while attaching/detaching task to a cgroup.
+ */
+static bool is_task_monitored(struct task_struct *tsk)
+{
+       return (tsk->rmid != NULL);
+}
+
 void perf_cgroup_arch_attach(struct cgroup_taskset *tset)
-{}
+{
+       struct cgroup_subsys_state *new_css;
+       struct cgrp_cqm_info *cqm_info;
+       struct task_struct *task;
+
+       mutex_lock(&cache_mutex);
+
+       cgroup_taskset_for_each(task, new_css, tset) {
+               if (!is_task_monitored(task))
+                       continue;
+
+               cqm_info = cqminfo_from_tsk(task);
+               if (cqm_info)
+                       add_cgrp_tskmon_entry(task->rmid,
+                                            &cqm_info->tskmon_rlist);
+       }
+       mutex_unlock(&cache_mutex);
+}
+
 int perf_cgroup_arch_can_attach(struct cgroup_taskset *tset)
-{}
+{
+       struct cgroup_subsys_state *new_css;
+       struct cgrp_cqm_info *cqm_info;
+       struct task_struct *task;
+
+       mutex_lock(&cache_mutex);
+       cgroup_taskset_for_each(task, new_css, tset) {
+               if (!is_task_monitored(task))
+                       continue;
+               cqm_info = cqminfo_from_tsk(task);
+
+               if (cqm_info)
+                       del_cgrp_tskmon_entry(task->rmid,
+                                            &cqm_info->tskmon_rlist);
+
+       }
+       mutex_unlock(&cache_mutex);
+
+       return 0;
+}
 #endif
 
 static inline void cqm_pick_event_reader(int cpu)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c8f4152..a6f8060b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1794,6 +1794,9 @@ struct task_struct {
 #ifdef CONFIG_INTEL_RDT_A
        int closid;
 #endif
+#ifdef CONFIG_INTEL_RDT_M
+       u32 *rmid;
+#endif
 #ifdef CONFIG_FUTEX
        struct robust_list_head __user *robust_list;
 #ifdef CONFIG_COMPAT
-- 
1.9.1

Reply via email to