On Tue, Jan 19, 2021 at 12:38:23PM -0800, kan.li...@linux.intel.com wrote:
> diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
> index a54d4a9..21267dc 100644
> --- a/arch/x86/events/intel/core.c
> +++ b/arch/x86/events/intel/core.c
> @@ -5206,7 +5209,7 @@ __init int intel_pmu_init(void)
>       union cpuid10_eax eax;
>       union cpuid10_ebx ebx;
>       struct event_constraint *c;
> -     unsigned int unused;
> +     unsigned int fixed_mask;
>       struct extra_reg *er;
>       bool pmem = false;
>       int version, i;
> @@ -5228,7 +5231,7 @@ __init int intel_pmu_init(void)
>        * Check whether the Architectural PerfMon supports
>        * Branch Misses Retired hw_event or not.
>        */
> -     cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
> +     cpuid(10, &eax.full, &ebx.full, &fixed_mask, &edx.full);
>       if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
>               return -ENODEV;
>  
> @@ -5255,8 +5258,16 @@ __init int intel_pmu_init(void)
>       if (version > 1) {
>               int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
>  
> -             x86_pmu.num_counters_fixed =
> -                     max((int)edx.split.num_counters_fixed, assume);
> +             if (!fixed_mask) {
> +                     x86_pmu.num_counters_fixed =
> +                             max((int)edx.split.num_counters_fixed, assume);
> +             } else {
> +                     /*
> +                      * The fixed-purpose counters are enumerated in the ECX
> +                      * since V5 perfmon.
> +                      */
> +                     x86_pmu.num_counters_fixed = fls(fixed_mask);
> +             }
>       }
>  
>       if (version >= 4)
> @@ -5847,8 +5858,11 @@ __init int intel_pmu_init(void)
>               x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
>       }
>  
> -     x86_pmu.intel_ctrl |=
> -             ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
> +     if (!fixed_mask) {
> +             x86_pmu.intel_ctrl |=
> +                     ((1LL << x86_pmu.num_counters_fixed)-1) << 
> INTEL_PMC_IDX_FIXED;
> +     } else
> +             x86_pmu.intel_ctrl |= (u64)fixed_mask << INTEL_PMC_IDX_FIXED;
>  
>       /* AnyThread may be deprecated on arch perfmon v5 or later */
>       if (x86_pmu.intel_cap.anythread_deprecated)

Maybe like so.

diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index fe940082d49a..274d75d33c14 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -4766,7 +4766,7 @@ __init int intel_pmu_init(void)
        union cpuid10_eax eax;
        union cpuid10_ebx ebx;
        struct event_constraint *c;
-       unsigned int unused;
+       unsigned int fixed_mask;
        struct extra_reg *er;
        bool pmem = false;
        int version, i;
@@ -4788,7 +4788,7 @@ __init int intel_pmu_init(void)
         * Check whether the Architectural PerfMon supports
         * Branch Misses Retired hw_event or not.
         */
-       cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
+       cpuid(10, &eax.full, &ebx.full, &fixed_mask, &edx.full);
        if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
                return -ENODEV;
 
@@ -4812,11 +4812,18 @@ __init int intel_pmu_init(void)
         * Quirk: v2 perfmon does not report fixed-purpose events, so
         * assume at least 3 events, when not running in a hypervisor:
         */
-       if (version > 1) {
+       if (version > 1 && version < 5) {
                int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
 
                x86_pmu.num_counters_fixed =
                        max((int)edx.split.num_counters_fixed, assume);
+
+               fixed_mask = (1L << x86_pmu.num_counters_fixed) - 1;
+
+       } else if (version >= 5 ) {
+
+               x86_pmu.num_counters_fixed = fls(fixed_mask);
+
        }
 
        if (boot_cpu_has(X86_FEATURE_PDCM)) {
@@ -5366,8 +5373,7 @@ __init int intel_pmu_init(void)
                x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
        }
 
-       x86_pmu.intel_ctrl |=
-               ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
+       x86_pmu.intel_ctrl |= (u64)fixed_mask << INTEL_PMC_IDX_FIXED;
 
        /* AnyThread may be deprecated on arch perfmon v5 or later */
        if (x86_pmu.intel_cap.anythread_deprecated)

Reply via email to