This patch add perf_ctx_resched() a global function that can be called to force rescheduling of events based on event types. The function locks both cpuctx and task_ctx internally. This will be used by a subsequent patch.
Signed-off-by: Stephane Eranian <eran...@google.com> Change-Id: Icbc05e5f461fd6e091b46778fe62b23f308e2be7 --- include/linux/perf_event.h | 14 ++++++++++++++ kernel/events/core.c | 18 +++++++++--------- 2 files changed, 23 insertions(+), 9 deletions(-) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 085a95e2582a..ee8a275df0ed 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -822,6 +822,15 @@ struct bpf_perf_event_data_kern { struct perf_event *event; }; +enum event_type_t { + EVENT_FLEXIBLE = 0x1, + EVENT_PINNED = 0x2, + EVENT_TIME = 0x4, + /* see ctx_resched() for details */ + EVENT_CPU = 0x8, + EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, +}; + #ifdef CONFIG_CGROUP_PERF /* @@ -888,6 +897,11 @@ extern void perf_sched_cb_dec(struct pmu *pmu); extern void perf_sched_cb_inc(struct pmu *pmu); extern int perf_event_task_disable(void); extern int perf_event_task_enable(void); + +extern void perf_ctx_resched(struct perf_cpu_context *cpuctx, + struct perf_event_context *task_ctx, + enum event_type_t event_type); + extern int perf_event_refresh(struct perf_event *event, int refresh); extern void perf_event_update_userpage(struct perf_event *event); extern int perf_event_release_kernel(struct perf_event *event); diff --git a/kernel/events/core.c b/kernel/events/core.c index dfc4bab0b02b..30474064ec22 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -354,15 +354,6 @@ static void event_function_local(struct perf_event *event, event_f func, void *d (PERF_SAMPLE_BRANCH_KERNEL |\ PERF_SAMPLE_BRANCH_HV) -enum event_type_t { - EVENT_FLEXIBLE = 0x1, - EVENT_PINNED = 0x2, - EVENT_TIME = 0x4, - /* see ctx_resched() for details */ - EVENT_CPU = 0x8, - EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED, -}; - /* * perf_sched_events : >0 events exist * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu @@ -2477,6 +2468,15 @@ static void ctx_resched(struct perf_cpu_context *cpuctx, perf_pmu_enable(cpuctx->ctx.pmu); } +void perf_ctx_resched(struct perf_cpu_context *cpuctx, + struct perf_event_context *task_ctx, + enum event_type_t event_type) +{ + perf_ctx_lock(cpuctx, task_ctx); + ctx_resched(cpuctx, task_ctx, event_type); + perf_ctx_unlock(cpuctx, task_ctx); +} + /* * Cross CPU call to install and enable a performance event * -- 2.21.0.392.gf8f6787159e-goog