On Fri, Jul 17, 2020 at 07:05:47AM -0700, kan.li...@linux.intel.com wrote:
> @@ -1031,6 +1034,35 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, 
> int n, int *assign)
>       return unsched ? -EINVAL : 0;
>  }
>  
> +static int add_nr_metric_event(struct cpu_hw_events *cpuc,
> +                            struct perf_event *event,
> +                            int *max_count, bool sibling)
> +{
> +     /* The TopDown metrics events cannot be shared. */
> +     if (is_metric_event(event) &&
> +         (++cpuc->n_metric_event > INTEL_TD_METRIC_NUM)) {
> +             cpuc->n_metric_event--;
> +             return -EINVAL;
> +     }
> +
> +     /*
> +      * Take the accepted metrics events into account for leader event.
> +      */
> +     if (!sibling)
> +             *max_count += cpuc->n_metric_event;
> +     else if (is_metric_event(event))
> +             (*max_count)++;
> +
> +     return 0;
> +}
> +
> +static void del_nr_metric_event(struct cpu_hw_events *cpuc,
> +                             struct perf_event *event)
> +{
> +     if (is_metric_event(event))
> +             cpuc->n_metric_event--;
> +}
> +
>  /*
>   * dogrp: true if must collect siblings events (group)
>   * returns total number of events and error code
> @@ -1066,6 +1098,10 @@ static int collect_events(struct cpu_hw_events *cpuc, 
> struct perf_event *leader,
>               cpuc->pebs_output = is_pebs_pt(leader) + 1;
>       }
>  
> +     if (x86_pmu.intel_cap.perf_metrics &&
> +         add_nr_metric_event(cpuc, leader, &max_count, false))
> +             return -EINVAL;
> +
>       if (is_x86_event(leader)) {
>               if (n >= max_count)
>                       return -EINVAL;
> @@ -1082,6 +1118,10 @@ static int collect_events(struct cpu_hw_events *cpuc, 
> struct perf_event *leader,
>                   event->state <= PERF_EVENT_STATE_OFF)
>                       continue;
>  
> +             if (x86_pmu.intel_cap.perf_metrics &&
> +                 add_nr_metric_event(cpuc, event, &max_count, true))
> +                     return -EINVAL;
> +
>               if (n >= max_count)
>                       return -EINVAL;
>  

Something like so perhaps ?

--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1035,24 +1035,14 @@ int x86_schedule_events(struct cpu_hw_ev
 }
 
 static int add_nr_metric_event(struct cpu_hw_events *cpuc,
-                              struct perf_event *event,
-                              int *max_count, bool sibling)
+                              struct perf_event *event)
 {
-       /* The TopDown metrics events cannot be shared. */
-       if (is_metric_event(event) &&
-           (++cpuc->n_metric_event > INTEL_TD_METRIC_NUM)) {
-               cpuc->n_metric_event--;
-               return -EINVAL;
+       if (is_metric_event(event)) {
+               if (cpuc->n_metric == INTEL_TD_METRIC_NUM)
+                       return -EINVAL;
+               cpuc->n_metric++;
        }
 
-       /*
-        * Take the accepted metrics events into account for leader event.
-        */
-       if (!sibling)
-               *max_count += cpuc->n_metric_event;
-       else if (is_metric_event(event))
-               (*max_count)++;
-
        return 0;
 }
 
@@ -1060,7 +1050,24 @@ static void del_nr_metric_event(struct c
                                struct perf_event *event)
 {
        if (is_metric_event(event))
-               cpuc->n_metric_event--;
+               cpuc->n_metric--;
+}
+
+static int collect_event(struct cpu_hw_events *cpuc, struct perf_event *event,
+                        int max_count, int n)
+{
+
+       if (x86_pmu.intel_cap.perf_metrics && add_nr_metric_event(cpuc, event))
+               return -EINVAL;
+
+       if (n >= max_count + cpuc->n_metric)
+               return -EINVAL;
+
+       cpuc->event_list[n] = event;
+       if (is_counter_pair(&event->hw))
+               cpuc->n_pair++;
+
+       return 0;
 }
 
 /*
@@ -1098,37 +1105,20 @@ static int collect_events(struct cpu_hw_
                cpuc->pebs_output = is_pebs_pt(leader) + 1;
        }
 
-       if (x86_pmu.intel_cap.perf_metrics &&
-           add_nr_metric_event(cpuc, leader, &max_count, false))
+       if (is_x86_event(leader) && collect_event(cpuc, leader, max_count, n))
                return -EINVAL;
+       n++;
 
-       if (is_x86_event(leader)) {
-               if (n >= max_count)
-                       return -EINVAL;
-               cpuc->event_list[n] = leader;
-               n++;
-               if (is_counter_pair(&leader->hw))
-                       cpuc->n_pair++;
-       }
        if (!dogrp)
                return n;
 
        for_each_sibling_event(event, leader) {
-               if (!is_x86_event(event) ||
-                   event->state <= PERF_EVENT_STATE_OFF)
+               if (!is_x86_event(event) || event->state <= 
PERF_EVENT_STATE_OFF)
                        continue;
 
-               if (x86_pmu.intel_cap.perf_metrics &&
-                   add_nr_metric_event(cpuc, event, &max_count, true))
-                       return -EINVAL;
-
-               if (n >= max_count)
+               if (collect_event(cpuc, event, max_count, n))
                        return -EINVAL;
-
-               cpuc->event_list[n] = event;
                n++;
-               if (is_counter_pair(&event->hw))
-                       cpuc->n_pair++;
        }
        return n;
 }
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -313,7 +313,7 @@ struct cpu_hw_events {
         * Perf Metrics
         */
        /* number of accepted metrics events */
-       int                             n_metric_event;
+       int                             n_metric;
 
        /*
         * AMD specific bits

Reply via email to