From: Kan Liang <kan.li...@intel.com>

Iterating all events which need to receive side-band events also bring
some overhead.

The side-band events overhead PERF_CORE_SB_OVERHEAD is a common overhead
type.

Signed-off-by: Kan Liang <kan.li...@intel.com>
---
 include/linux/perf_event.h      |  2 ++
 include/uapi/linux/perf_event.h |  1 +
 kernel/events/core.c            | 27 ++++++++++++++++++++++++---
 3 files changed, 27 insertions(+), 3 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 351d321..fe4ca0b 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -765,6 +765,8 @@ struct perf_event_context {
 #endif
        void                            *task_ctx_data; /* pmu specific data */
        struct rcu_head                 rcu_head;
+
+       struct perf_overhead_entry      sb_overhead;
 };
 
 /*
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 355086f..bdf2eec 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -1000,6 +1000,7 @@ struct perf_branch_entry {
 enum perf_record_overhead_type {
        PERF_CORE_OVERHEAD       = 0,
        PERF_CORE_MUX_OVERHEAD   = 0,
+       PERF_CORE_SB_OVERHEAD,
 
        PERF_PMU_OVERHEAD        = 20,
        PERF_PMU_SAMPLE_OVERHEAD = 20,
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 025a19d..85706fb 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1830,8 +1830,12 @@ event_sched_out(struct perf_event *event,
        if (event->attr.exclusive || !cpuctx->active_oncpu)
                cpuctx->exclusive = 0;
 
-       if (log_overhead && cpuctx->mux_overhead.nr)
-               perf_log_overhead(event, PERF_CORE_MUX_OVERHEAD, 
&cpuctx->mux_overhead);
+       if (log_overhead) {
+               if (cpuctx->mux_overhead.nr)
+                       perf_log_overhead(event, PERF_CORE_MUX_OVERHEAD, 
&cpuctx->mux_overhead);
+               if (ctx->sb_overhead.nr)
+                       perf_log_overhead(event, PERF_CORE_SB_OVERHEAD, 
&ctx->sb_overhead);
+       }
 
        perf_pmu_enable(event->pmu);
 }
@@ -6131,6 +6135,13 @@ static void perf_iterate_sb_cpu(perf_iterate_f output, 
void *data)
        }
 }
 
+static void
+perf_calculate_sb_overhead(struct perf_event_context *ctx, u64 time)
+{
+       ctx->sb_overhead.nr++;
+       ctx->sb_overhead.time += time;
+}
+
 /*
  * Iterate all events that need to receive side-band events.
  *
@@ -6141,9 +6152,12 @@ static void
 perf_iterate_sb(perf_iterate_f output, void *data,
               struct perf_event_context *task_ctx)
 {
+       struct perf_event_context *overhead_ctx = task_ctx;
        struct perf_event_context *ctx;
+       u64 start_clock, end_clock;
        int ctxn;
 
+       start_clock = perf_clock();
        rcu_read_lock();
        preempt_disable();
 
@@ -6161,12 +6175,19 @@ perf_iterate_sb(perf_iterate_f output, void *data,
 
        for_each_task_context_nr(ctxn) {
                ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
-               if (ctx)
+               if (ctx) {
                        perf_iterate_ctx(ctx, output, data, false);
+                       if (!overhead_ctx)
+                               overhead_ctx = ctx;
+               }
        }
 done:
        preempt_enable();
        rcu_read_unlock();
+
+       end_clock = perf_clock();
+       if (overhead_ctx)
+               perf_calculate_sb_overhead(overhead_ctx, end_clock - 
start_clock);
 }
 
 /*
-- 
2.5.5

Reply via email to