This patch renames ctx_resched() to perf_ctx_resched() and makes
the function globally accessible. This is to prepare for the next
patch which needs to call this function from arch specific code.

Signed-off-by: Stephane Eranian <eran...@google.com>
---
 include/linux/perf_event.h | 12 ++++++++++++
 kernel/events/core.c       | 21 ++++++---------------
 2 files changed, 18 insertions(+), 15 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 514de997696b..150cfd493ad2 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -829,6 +829,15 @@ struct bpf_perf_event_data_kern {
        struct perf_event *event;
 };
 
+enum event_type_t {
+       EVENT_FLEXIBLE = 0x1,
+       EVENT_PINNED = 0x2,
+       EVENT_TIME = 0x4,
+       /* see ctx_resched() for details */
+       EVENT_CPU = 0x8,
+       EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
+};
+
 #ifdef CONFIG_CGROUP_PERF
 
 /*
@@ -895,6 +904,9 @@ extern void perf_sched_cb_dec(struct pmu *pmu);
 extern void perf_sched_cb_inc(struct pmu *pmu);
 extern int perf_event_task_disable(void);
 extern int perf_event_task_enable(void);
+extern void perf_ctx_resched(struct perf_cpu_context *cpuctx,
+                       struct perf_event_context *task_ctx,
+                       enum event_type_t event_type);
 extern int perf_event_refresh(struct perf_event *event, int refresh);
 extern void perf_event_update_userpage(struct perf_event *event);
 extern int perf_event_release_kernel(struct perf_event *event);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 429bf6d8be95..48b955a2b7f1 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -338,15 +338,6 @@ static void event_function_local(struct perf_event *event, 
event_f func, void *d
        (PERF_SAMPLE_BRANCH_KERNEL |\
         PERF_SAMPLE_BRANCH_HV)
 
-enum event_type_t {
-       EVENT_FLEXIBLE = 0x1,
-       EVENT_PINNED = 0x2,
-       EVENT_TIME = 0x4,
-       /* see ctx_resched() for details */
-       EVENT_CPU = 0x8,
-       EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
-};
-
 /*
  * perf_sched_events : >0 events exist
  * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
@@ -2430,9 +2421,9 @@ static void perf_event_sched_in(struct perf_cpu_context 
*cpuctx,
  * event_type is a bit mask of the types of events involved. For CPU events,
  * event_type is only either EVENT_PINNED or EVENT_FLEXIBLE.
  */
-static void ctx_resched(struct perf_cpu_context *cpuctx,
-                       struct perf_event_context *task_ctx,
-                       enum event_type_t event_type)
+void perf_ctx_resched(struct perf_cpu_context *cpuctx,
+                     struct perf_event_context *task_ctx,
+                     enum event_type_t event_type)
 {
        enum event_type_t ctx_event_type;
        bool cpu_event = !!(event_type & EVENT_CPU);
@@ -2520,7 +2511,7 @@ static int  __perf_install_in_context(void *info)
        if (reprogram) {
                ctx_sched_out(ctx, cpuctx, EVENT_TIME);
                add_event_to_ctx(event, ctx);
-               ctx_resched(cpuctx, task_ctx, get_event_type(event));
+               perf_ctx_resched(cpuctx, task_ctx, get_event_type(event));
        } else {
                add_event_to_ctx(event, ctx);
        }
@@ -2664,7 +2655,7 @@ static void __perf_event_enable(struct perf_event *event,
        if (ctx->task)
                WARN_ON_ONCE(task_ctx != ctx);
 
-       ctx_resched(cpuctx, task_ctx, get_event_type(event));
+       perf_ctx_resched(cpuctx, task_ctx, get_event_type(event));
 }
 
 /*
@@ -3782,7 +3773,7 @@ static void perf_event_enable_on_exec(int ctxn)
         */
        if (enabled) {
                clone_ctx = unclone_ctx(ctx);
-               ctx_resched(cpuctx, ctx, event_type);
+               perf_ctx_resched(cpuctx, ctx, event_type);
        } else {
                ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
        }
-- 
2.21.0.392.gf8f6787159e-goog

Reply via email to