From: Dave Hansen <dave.han...@linux.intel.com>

Use the new model number macros instead of spelling things out
in the comments.

Note that this is missing a Nehalem model that is mentioned in
intel_idle which is fixed up in a later patch.

The resulting binary (arch/x86/events/intel/core.o) is exactly
the same with and without this patch modulo some harmless changes
to restoring %esi in the return path of functions, even those
untouched by this patch.

Signed-off-by: Dave Hansen <dave.han...@linux.intel.com>
Cc: Andi Kleen <a...@linux.intel.com>
Cc: Kan Liang <kan.li...@intel.com>
Cc: Stephane Eranian <eran...@google.com>
Cc: "Yan, Zheng" <zheng.z....@intel.com>
---

 b/arch/x86/events/intel/core.c |   87 ++++++++++++++++++++---------------------
 1 file changed, 44 insertions(+), 43 deletions(-)

diff -puN arch/x86/events/intel/core.c~x86-intel-families-core-events 
arch/x86/events/intel/core.c
--- a/arch/x86/events/intel/core.c~x86-intel-families-core-events       
2016-06-01 15:45:03.301903980 -0700
+++ b/arch/x86/events/intel/core.c      2016-06-01 15:45:03.310904391 -0700
@@ -16,6 +16,7 @@
 
 #include <asm/cpufeature.h>
 #include <asm/hardirq.h>
+#include <asm/intel-family.h>
 #include <asm/apic.h>
 
 #include "../perf_event.h"
@@ -3261,11 +3262,11 @@ static int intel_snb_pebs_broken(int cpu
        u32 rev = UINT_MAX; /* default to broken for unknown models */
 
        switch (cpu_data(cpu).x86_model) {
-       case 42: /* SNB */
+       case INTEL_FAM6_MODEL_SANDYBRIDGE:
                rev = 0x28;
                break;
 
-       case 45: /* SNB-EP */
+       case INTEL_FAM6_MODEL_SANDYBRIDGE_X:
                switch (cpu_data(cpu).x86_mask) {
                case 6: rev = 0x618; break;
                case 7: rev = 0x70c; break;
@@ -3508,15 +3509,15 @@ __init int intel_pmu_init(void)
         * Install the hw-cache-events table:
         */
        switch (boot_cpu_data.x86_model) {
-       case 14: /* 65nm Core "Yonah" */
+       case INTEL_FAM6_MODEL_CORE_YONAH:
                pr_cont("Core events, ");
                break;
 
-       case 15: /* 65nm Core2 "Merom"          */
+       case INTEL_FAM6_MODEL_CORE2_MEROM:
                x86_add_quirk(intel_clovertown_quirk);
-       case 22: /* 65nm Core2 "Merom-L"        */
-       case 23: /* 45nm Core2 "Penryn"         */
-       case 29: /* 45nm Core2 "Dunnington (MP) */
+       case INTEL_FAM6_MODEL_CORE2_MEROM_L:
+       case INTEL_FAM6_MODEL_CORE2_PENRYN:
+       case INTEL_FAM6_MODEL_CORE2_DUNNINGTON:
                memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
 
@@ -3527,9 +3528,9 @@ __init int intel_pmu_init(void)
                pr_cont("Core2 events, ");
                break;
 
-       case 30: /* 45nm Nehalem    */
-       case 26: /* 45nm Nehalem-EP */
-       case 46: /* 45nm Nehalem-EX */
+       case INTEL_FAM6_MODEL_NEHALEM:
+       case INTEL_FAM6_MODEL_NEHALEM_EP:
+       case INTEL_FAM6_MODEL_NEHALEM_EX:
                memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
@@ -3557,11 +3558,11 @@ __init int intel_pmu_init(void)
                pr_cont("Nehalem events, ");
                break;
 
-       case 28: /* 45nm Atom "Pineview"   */
-       case 38: /* 45nm Atom "Lincroft"   */
-       case 39: /* 32nm Atom "Penwell"    */
-       case 53: /* 32nm Atom "Cloverview" */
-       case 54: /* 32nm Atom "Cedarview"  */
+       case INTEL_FAM6_MODEL_ATOM_PINEVIEW:
+       case INTEL_FAM6_MODEL_ATOM_LINCROFT:
+       case INTEL_FAM6_MODEL_ATOM_PENWELL:
+       case INTEL_FAM6_MODEL_ATOM_CLOVERVIEW:
+       case INTEL_FAM6_MODEL_ATOM_CEDARVIEW:
                memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
 
@@ -3573,9 +3574,9 @@ __init int intel_pmu_init(void)
                pr_cont("Atom events, ");
                break;
 
-       case 55: /* 22nm Atom "Silvermont"                */
-       case 76: /* 14nm Atom "Airmont"                   */
-       case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
+       case INTEL_FAM6_MODEL_ATOM_SILVERMONT1:
+       case INTEL_FAM6_MODEL_ATOM_SILVERMONT2:
+       case INTEL_FAM6_MODEL_ATOM_AIRMONT:
                memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
                        sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
@@ -3590,8 +3591,8 @@ __init int intel_pmu_init(void)
                pr_cont("Silvermont events, ");
                break;
 
-       case 92: /* 14nm Atom "Goldmont" */
-       case 95: /* 14nm Atom "Goldmont Denverton" */
+       case INTEL_FAM6_MODEL_ATOM_GOLDMONT:
+       case INTEL_FAM6_MODEL_ATOM_DENVERTON:
                memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
@@ -3614,9 +3615,9 @@ __init int intel_pmu_init(void)
                pr_cont("Goldmont events, ");
                break;
 
-       case 37: /* 32nm Westmere    */
-       case 44: /* 32nm Westmere-EP */
-       case 47: /* 32nm Westmere-EX */
+       case INTEL_FAM6_MODEL_WESTMERE:
+       case INTEL_FAM6_MODEL_WESTMERE_EP:
+       case INTEL_FAM6_MODEL_WESTMERE_EX:
                memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
@@ -3643,8 +3644,8 @@ __init int intel_pmu_init(void)
                pr_cont("Westmere events, ");
                break;
 
-       case 42: /* 32nm SandyBridge         */
-       case 45: /* 32nm SandyBridge-E/EN/EP */
+       case INTEL_FAM6_MODEL_SANDYBRIDGE:
+       case INTEL_FAM6_MODEL_SANDYBRIDGE_X:
                x86_add_quirk(intel_sandybridge_quirk);
                x86_add_quirk(intel_ht_bug);
                memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
@@ -3657,7 +3658,7 @@ __init int intel_pmu_init(void)
                x86_pmu.event_constraints = intel_snb_event_constraints;
                x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
                x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
-               if (boot_cpu_data.x86_model == 45)
+               if (boot_cpu_data.x86_model == INTEL_FAM6_MODEL_SANDYBRIDGE_X)
                        x86_pmu.extra_regs = intel_snbep_extra_regs;
                else
                        x86_pmu.extra_regs = intel_snb_extra_regs;
@@ -3679,8 +3680,8 @@ __init int intel_pmu_init(void)
                pr_cont("SandyBridge events, ");
                break;
 
-       case 58: /* 22nm IvyBridge       */
-       case 62: /* 22nm IvyBridge-EP/EX */
+       case INTEL_FAM6_MODEL_IVYBRIDGE:
+       case INTEL_FAM6_MODEL_IVYBRIDGE_X:
                x86_add_quirk(intel_ht_bug);
                memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
@@ -3696,7 +3697,7 @@ __init int intel_pmu_init(void)
                x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
                x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
                x86_pmu.pebs_prec_dist = true;
-               if (boot_cpu_data.x86_model == 62)
+               if (boot_cpu_data.x86_model == INTEL_FAM6_MODEL_IVYBRIDGE_X)
                        x86_pmu.extra_regs = intel_snbep_extra_regs;
                else
                        x86_pmu.extra_regs = intel_snb_extra_regs;
@@ -3714,10 +3715,10 @@ __init int intel_pmu_init(void)
                break;
 
 
-       case 60: /* 22nm Haswell Core */
-       case 63: /* 22nm Haswell Server */
-       case 69: /* 22nm Haswell ULT */
-       case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
+       case INTEL_FAM6_MODEL_HASWELL_CORE:
+       case INTEL_FAM6_MODEL_HASWELL_X:
+       case INTEL_FAM6_MODEL_HASWELL_ULT:
+       case INTEL_FAM6_MODEL_HASWELL_GT3E:
                x86_add_quirk(intel_ht_bug);
                x86_pmu.late_ack = true;
                memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, 
sizeof(hw_cache_event_ids));
@@ -3741,10 +3742,10 @@ __init int intel_pmu_init(void)
                pr_cont("Haswell events, ");
                break;
 
-       case 61: /* 14nm Broadwell Core-M */
-       case 86: /* 14nm Broadwell Xeon D */
-       case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
-       case 79: /* 14nm Broadwell Server */
+       case INTEL_FAM6_MODEL_BROADWELL_CORE_M:
+       case INTEL_FAM6_MODEL_BROADWELL_XEON_D:
+       case INTEL_FAM6_MODEL_BROADWELL_GT3E:
+       case INTEL_FAM6_MODEL_BROADWELL_X:
                x86_pmu.late_ack = true;
                memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, 
sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, 
sizeof(hw_cache_extra_regs));
@@ -3777,7 +3778,7 @@ __init int intel_pmu_init(void)
                pr_cont("Broadwell events, ");
                break;
 
-       case 87: /* Knights Landing Xeon Phi */
+       case INTEL_FAM6_MODEL_XEON_PHI_KNL:
                memcpy(hw_cache_event_ids,
                       slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs,
@@ -3795,11 +3796,11 @@ __init int intel_pmu_init(void)
                pr_cont("Knights Landing events, ");
                break;
 
-       case 142: /* 14nm Kabylake Mobile */
-       case 158: /* 14nm Kabylake Desktop */
-       case 78: /* 14nm Skylake Mobile */
-       case 94: /* 14nm Skylake Desktop */
-       case 85: /* 14nm Skylake Server */
+       case INTEL_FAM6_MODEL_SKYLAKE_MOBILE:
+       case INTEL_FAM6_MODEL_SKYLAKE_DESKTOP:
+       case INTEL_FAM6_MODEL_SKYLAKE_X:
+       case INTEL_FAM6_MODEL_KABYLAKE_MOBILE:
+       case INTEL_FAM6_MODEL_KABYLAKE_DESKTOP:
                x86_pmu.late_ack = true;
                memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, 
sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, 
sizeof(hw_cache_extra_regs));
_

Reply via email to