From: "Yan, Zheng" <zheng.z....@intel.com>

The x86 special perf event context is named x86_perf_event_context,
We can enlarge it later to store PMU special data.

Signed-off-by: Yan, Zheng <zheng.z....@intel.com>
---
 arch/x86/kernel/cpu/perf_event.c | 12 ++++++++++++
 arch/x86/kernel/cpu/perf_event.h |  4 ++++
 include/linux/perf_event.h       |  5 +++++
 kernel/events/core.c             | 28 ++++++++++++++++++----------
 4 files changed, 39 insertions(+), 10 deletions(-)

diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index afc2413..b2eada9 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1768,6 +1768,17 @@ static int x86_pmu_event_idx(struct perf_event *event)
        return idx + 1;
 }
 
+static void *x86_pmu_event_context_alloc(struct perf_event_context *parent_ctx)
+{
+       struct perf_event_context *ctx;
+
+       ctx = kzalloc(sizeof(struct x86_perf_event_context), GFP_KERNEL);
+       if (!ctx)
+               return ERR_PTR(-ENOMEM);
+
+       return ctx;
+}
+
 static ssize_t get_attr_rdpmc(struct device *cdev,
                              struct device_attribute *attr,
                              char *buf)
@@ -1855,6 +1866,7 @@ static struct pmu pmu = {
 
        .event_idx              = x86_pmu_event_idx,
        .flush_branch_stack     = x86_pmu_flush_branch_stack,
+       .event_context_alloc    = x86_pmu_event_context_alloc,
 };
 
 void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index e14c963..08469de 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -454,6 +454,10 @@ enum {
        PERF_SAMPLE_BRANCH_CALL_STACK = 1U << 
PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
 };
 
+struct x86_perf_event_context {
+       struct perf_event_context ctx;
+};
+
 #define x86_add_quirk(func_)                                           \
 do {                                                                   \
        static struct x86_pmu_quirk __quirk __initdata = {              \
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 50b3efd..f6d1d59 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -274,6 +274,11 @@ struct pmu {
         * flush branch stack on context-switches (needed in cpu-wide mode)
         */
        void (*flush_branch_stack)      (void);
+
+       /*
+        * Allocate PMU special perf event context
+        */
+       void *(*event_context_alloc)    (struct perf_event_context *parent_ctx);
 };
 
 /**
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 1db3af9..3aececc 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2961,13 +2961,20 @@ static void __perf_event_init_context(struct 
perf_event_context *ctx)
 }
 
 static struct perf_event_context *
-alloc_perf_context(struct pmu *pmu, struct task_struct *task)
+alloc_perf_context(struct pmu *pmu, struct task_struct *task,
+                  struct perf_event_context *parent_ctx)
 {
        struct perf_event_context *ctx;
 
-       ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
-       if (!ctx)
-               return NULL;
+       if (pmu->event_context_alloc) {
+               ctx = pmu->event_context_alloc(parent_ctx);
+               if (IS_ERR(ctx))
+                       return ctx;
+       } else {
+               ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
+               if (!ctx)
+                       return ERR_PTR(-ENOMEM);
+       }
 
        __perf_event_init_context(ctx);
        if (task) {
@@ -3053,10 +3060,11 @@ retry:
                ++ctx->pin_count;
                raw_spin_unlock_irqrestore(&ctx->lock, flags);
        } else {
-               ctx = alloc_perf_context(pmu, task);
-               err = -ENOMEM;
-               if (!ctx)
+               ctx = alloc_perf_context(pmu, task, NULL);
+               if (IS_ERR(ctx)) {
+                       err = PTR_ERR(ctx);
                        goto errout;
+               }
 
                err = 0;
                mutex_lock(&task->perf_event_mutex);
@@ -7465,9 +7473,9 @@ inherit_task_group(struct perf_event *event, struct 
task_struct *parent,
                 * child.
                 */
 
-               child_ctx = alloc_perf_context(event->pmu, child);
-               if (!child_ctx)
-                       return -ENOMEM;
+               child_ctx = alloc_perf_context(event->pmu, child, parent_ctx);
+               if (IS_ERR(child_ctx))
+                       return PTR_ERR(child_ctx);
 
                child->perf_event_ctxp[ctxn] = child_ctx;
        }
-- 
1.8.1.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to