On Wed, Mar 10, 2021 at 08:37:52AM -0800, kan.li...@linux.intel.com wrote:
> +static void init_hybrid_pmu(int cpu)
> +{
> +     unsigned int fixed_mask, unused_eax, unused_ebx, unused_edx;
> +     struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
> +     u8 cpu_type = get_hybrid_cpu_type(cpu);
> +     struct x86_hybrid_pmu *pmu = NULL;
> +     struct perf_cpu_context *cpuctx;
> +     int i;
> +
> +     for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
> +             if (x86_pmu.hybrid_pmu[i].cpu_type == cpu_type) {
> +                     pmu = &x86_pmu.hybrid_pmu[i];
> +                     break;
> +             }
> +     }
> +     if (WARN_ON_ONCE(!pmu))
> +             return;
> +
> +     cpuc->pmu = &pmu->pmu;
> +
> +     /* Only register PMU for the first CPU */
> +     if (!cpumask_empty(&pmu->supported_cpus)) {
> +             cpumask_set_cpu(cpu, &pmu->supported_cpus);
> +             goto end;
> +     }
> +
> +     if (!check_hw_exists(&pmu->pmu, pmu->num_counters, 
> pmu->num_counters_fixed))
> +             return;
> +
> +     if ((pmu->pmu.type == -1) &&
> +         perf_pmu_register(&pmu->pmu, pmu->name, 
> x86_get_hybrid_pmu_type(pmu->cpu_type)))
> +             return;
> +
> +     /*
> +      * Except for ECX, other fields have been stored in the x86 struct
> +      * at boot time.
> +      */
> +     cpuid(10, &unused_eax, &unused_ebx, &fixed_mask, &unused_edx);
> +
> +     intel_pmu_check_num_counters(&pmu->num_counters,
> +                                  &pmu->num_counters_fixed,
> +                                  &pmu->intel_ctrl,
> +                                  (u64)fixed_mask);
> +
> +     pr_info("%s PMU driver: ", pmu->name);
> +
> +     if (pmu->intel_cap.perf_metrics) {
> +             pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
> +             pmu->intel_ctrl |= INTEL_PMC_MSK_FIXED_SLOTS;
> +     }
> +
> +     if (pmu->intel_cap.pebs_output_pt_available) {
> +             pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
> +             pr_cont("PEBS-via-PT ");
> +     }
> +
> +     intel_pmu_check_event_constraints(pmu->event_constraints,
> +                                       pmu->num_counters,
> +                                       pmu->num_counters_fixed,
> +                                       pmu->intel_ctrl);
> +
> +     intel_pmu_check_extra_regs(pmu->extra_regs);
> +
> +     pr_cont("\n");
> +
> +     x86_pmu_show_pmu_cap(pmu->num_counters, pmu->num_counters_fixed,
> +                          pmu->intel_ctrl);
> +
> +     cpumask_set_cpu(cpu, &pmu->supported_cpus);
> +end:
> +     /*
> +      * The cpuctx of all CPUs are allocated when registering the
> +      * boot CPU's PMU. At that time, the PMU for other hybrid CPUs
> +      * is not registered yet. The boot CPU's PMU was
> +      * unconditionally assigned to each cpuctx->ctx.pmu.
> +      * Update the cpuctx->ctx.pmu when the PMU for other hybrid
> +      * CPUs is known.
> +      */
> +     cpuctx = per_cpu_ptr(pmu->pmu.pmu_cpu_context, cpu);
> +     cpuctx->ctx.pmu = &pmu->pmu;
> +}
> +
>  static void intel_pmu_cpu_starting(int cpu)
>  {
>       struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
>       int core_id = topology_core_id(cpu);
>       int i;
>  
> +     if (is_hybrid())
> +             init_hybrid_pmu(cpu);
> +
>       init_debug_store_on_cpu(cpu);
>       /*
>        * Deal with CPUs that don't clear their LBRs on power-up.

This is buggered. CPU_STARTING is the initial IRQs disabled part of
hotplug, but you're calling perf_pmu_register() which does mutex_lock().

Reply via email to