These MSRs, while being Intel specific, are used to offer virtualised
CPUID faulting support on AMD hardware, so remove the INTEL infix.

The bit position constants are used by guest_rdmsr(), but the logic can
be expressed using MASK_INSR() which allows the removal of the bit
position constants.

Signed-off-by: Andrew Cooper <andrew.coop...@citrix.com>
---
CC: Jan Beulich <jbeul...@suse.com>
CC: Wei Liu <wei.l...@citrix.com>
CC: Roger Pau Monné <roger....@citrix.com>
---
 xen/arch/x86/cpu/common.c              | 16 +++++++---------
 xen/arch/x86/domctl.c                  |  4 ++--
 xen/arch/x86/hvm/hvm.c                 |  4 ++--
 xen/arch/x86/msr.c                     | 30 +++++++++++++++---------------
 xen/arch/x86/x86_emulate/x86_emulate.c |  6 +++---
 xen/include/asm-x86/msr-index.h        | 15 ++++++---------
 xen/include/asm-x86/msr.h              |  4 ++--
 7 files changed, 37 insertions(+), 42 deletions(-)

diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
index 3548b12..a83077f 100644
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -115,19 +115,17 @@ bool __init probe_cpuid_faulting(void)
        uint64_t val;
        int rc;
 
-       if ((rc = rdmsr_safe(MSR_INTEL_PLATFORM_INFO, val)) == 0)
+       if ((rc = rdmsr_safe(MSR_PLATFORM_INFO, val)) == 0)
        {
                struct msr_domain_policy *dp = &raw_msr_domain_policy;
 
                dp->plaform_info.available = true;
-               if (val & MSR_PLATFORM_INFO_CPUID_FAULTING)
+               if (val & PLATFORM_INFO_CPUID_FAULTING)
                        dp->plaform_info.cpuid_faulting = true;
        }
 
-       if (rc ||
-           !(val & MSR_PLATFORM_INFO_CPUID_FAULTING) ||
-           rdmsr_safe(MSR_INTEL_MISC_FEATURES_ENABLES,
-                      this_cpu(msr_misc_features)))
+       if (rc || !(val & PLATFORM_INFO_CPUID_FAULTING) ||
+           rdmsr_safe(MSR_MISC_FEATURES_ENABLES, this_cpu(msr_misc_features)))
        {
                setup_clear_cpu_cap(X86_FEATURE_CPUID_FAULTING);
                return false;
@@ -145,12 +143,12 @@ static void set_cpuid_faulting(bool enable)
        uint64_t *this_misc_features = &this_cpu(msr_misc_features);
        uint64_t val = *this_misc_features;
 
-       if (!!(val & MSR_MISC_FEATURES_CPUID_FAULTING) == enable)
+       if ((val & MISC_FEATURES_CPUID_FAULTING) == enable)
                return;
 
-       val ^= MSR_MISC_FEATURES_CPUID_FAULTING;
+       val ^= MISC_FEATURES_CPUID_FAULTING;
 
-       wrmsrl(MSR_INTEL_MISC_FEATURES_ENABLES, val);
+       wrmsrl(MSR_MISC_FEATURES_ENABLES, val);
        *this_misc_features = val;
 }
 
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index 8fbbf3a..6bbde04 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1248,7 +1248,7 @@ long arch_do_domctl(
         struct vcpu *v;
         static const uint32_t msrs_to_send[] = {
             MSR_SPEC_CTRL,
-            MSR_INTEL_MISC_FEATURES_ENABLES,
+            MSR_MISC_FEATURES_ENABLES,
         };
         uint32_t nr_msrs = ARRAY_SIZE(msrs_to_send);
 
@@ -1372,7 +1372,7 @@ long arch_do_domctl(
                 switch ( msr.index )
                 {
                 case MSR_SPEC_CTRL:
-                case MSR_INTEL_MISC_FEATURES_ENABLES:
+                case MSR_MISC_FEATURES_ENABLES:
                     if ( guest_wrmsr(v, msr.index, msr.value) != X86EMUL_OKAY )
                         break;
                     continue;
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 4e247d0..5823620 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1335,7 +1335,7 @@ static int hvm_load_cpu_xsave_states(struct domain *d, 
hvm_domain_context_t *h)
 #define HVM_CPU_MSR_SIZE(cnt) offsetof(struct hvm_msr, msr[cnt])
 static const uint32_t msrs_to_send[] = {
     MSR_SPEC_CTRL,
-    MSR_INTEL_MISC_FEATURES_ENABLES,
+    MSR_MISC_FEATURES_ENABLES,
 };
 static unsigned int __read_mostly msr_count_max = ARRAY_SIZE(msrs_to_send);
 
@@ -1471,7 +1471,7 @@ static int hvm_load_cpu_msrs(struct domain *d, 
hvm_domain_context_t *h)
             int rc;
 
         case MSR_SPEC_CTRL:
-        case MSR_INTEL_MISC_FEATURES_ENABLES:
+        case MSR_MISC_FEATURES_ENABLES:
             rc = guest_wrmsr(v, ctxt->msr[i].index, ctxt->msr[i].val);
 
             if ( rc != X86EMUL_OKAY )
diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
index 1e12ccb..0162890 100644
--- a/xen/arch/x86/msr.c
+++ b/xen/arch/x86/msr.c
@@ -36,7 +36,7 @@ struct msr_vcpu_policy __read_mostly hvm_max_msr_vcpu_policy,
 
 static void __init calculate_raw_policy(void)
 {
-    /* 0x000000ce  MSR_INTEL_PLATFORM_INFO */
+    /* 0x000000ce - MSR_PLATFORM_INFO */
     /* Was already added by probe_cpuid_faulting() */
 }
 
@@ -46,7 +46,7 @@ static void __init calculate_host_policy(void)
 
     *dp = raw_msr_domain_policy;
 
-    /* 0x000000ce  MSR_INTEL_PLATFORM_INFO */
+    /* 0x000000ce - MSR_PLATFORM_INFO */
     /* probe_cpuid_faulting() sanity checks presence of MISC_FEATURES_ENABLES 
*/
     dp->plaform_info.cpuid_faulting = cpu_has_cpuid_faulting;
 }
@@ -61,7 +61,7 @@ static void __init calculate_hvm_max_policy(void)
 
     *dp = host_msr_domain_policy;
 
-    /* 0x000000ce  MSR_INTEL_PLATFORM_INFO */
+    /* 0x000000ce - MSR_PLATFORM_INFO */
     /* It's always possible to emulate CPUID faulting for HVM guests */
     if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
          boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
@@ -70,7 +70,7 @@ static void __init calculate_hvm_max_policy(void)
         dp->plaform_info.cpuid_faulting = true;
     }
 
-    /* 0x00000140  MSR_INTEL_MISC_FEATURES_ENABLES */
+    /* 0x00000140 - MSR_MISC_FEATURES_ENABLES */
     vp->misc_features_enables.available = dp->plaform_info.cpuid_faulting;
 }
 
@@ -81,7 +81,7 @@ static void __init calculate_pv_max_policy(void)
 
     *dp = host_msr_domain_policy;
 
-    /* 0x00000140  MSR_INTEL_MISC_FEATURES_ENABLES */
+    /* 0x00000140 - MSR_MISC_FEATURES_ENABLES */
     vp->misc_features_enables.available = dp->plaform_info.cpuid_faulting;
 }
 
@@ -159,22 +159,22 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr, 
uint64_t *val)
         *val = vp->spec_ctrl.raw;
         break;
 
-    case MSR_INTEL_PLATFORM_INFO:
+    case MSR_PLATFORM_INFO:
         if ( !dp->plaform_info.available )
             goto gp_fault;
-        *val = (uint64_t)dp->plaform_info.cpuid_faulting <<
-               _MSR_PLATFORM_INFO_CPUID_FAULTING;
+        *val = MASK_INSR(dp->plaform_info.cpuid_faulting,
+                         PLATFORM_INFO_CPUID_FAULTING);
         break;
 
     case MSR_ARCH_CAPABILITIES:
         /* Not implemented yet. */
         goto gp_fault;
 
-    case MSR_INTEL_MISC_FEATURES_ENABLES:
+    case MSR_MISC_FEATURES_ENABLES:
         if ( !vp->misc_features_enables.available )
             goto gp_fault;
-        *val = (uint64_t)vp->misc_features_enables.cpuid_faulting <<
-               _MSR_MISC_FEATURES_CPUID_FAULTING;
+        *val = MASK_INSR(vp->misc_features_enables.cpuid_faulting,
+                         MISC_FEATURES_CPUID_FAULTING);
         break;
 
     default:
@@ -199,7 +199,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
     {
         uint64_t rsvd;
 
-    case MSR_INTEL_PLATFORM_INFO:
+    case MSR_PLATFORM_INFO:
     case MSR_ARCH_CAPABILITIES:
         /* Read-only */
         goto gp_fault;
@@ -254,7 +254,7 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
             wrmsrl(MSR_PRED_CMD, val);
         break;
 
-    case MSR_INTEL_MISC_FEATURES_ENABLES:
+    case MSR_MISC_FEATURES_ENABLES:
     {
         bool old_cpuid_faulting = vp->misc_features_enables.cpuid_faulting;
 
@@ -263,13 +263,13 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t 
val)
 
         rsvd = ~0ull;
         if ( dp->plaform_info.cpuid_faulting )
-            rsvd &= ~MSR_MISC_FEATURES_CPUID_FAULTING;
+            rsvd &= ~MISC_FEATURES_CPUID_FAULTING;
 
         if ( val & rsvd )
             goto gp_fault;
 
         vp->misc_features_enables.cpuid_faulting =
-            val & MSR_MISC_FEATURES_CPUID_FAULTING;
+            val & MISC_FEATURES_CPUID_FAULTING;
 
         if ( v == curr && is_hvm_domain(d) && cpu_has_cpuid_faulting &&
              (old_cpuid_faulting ^ vp->misc_features_enables.cpuid_faulting) )
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c 
b/xen/arch/x86/x86_emulate/x86_emulate.c
index e372c4b..1a84c90 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -6525,9 +6525,9 @@ x86_emulate(
         msr_val = 0;
         fail_if(ops->cpuid == NULL);
 
-        /* Speculatively read MSR_INTEL_MISC_FEATURES_ENABLES. */
+        /* Speculatively read MSR_MISC_FEATURES_ENABLES. */
         if ( ops->read_msr && !mode_ring0() &&
-             (rc = ops->read_msr(MSR_INTEL_MISC_FEATURES_ENABLES,
+             (rc = ops->read_msr(MSR_MISC_FEATURES_ENABLES,
                                  &msr_val, ctxt)) == X86EMUL_EXCEPTION )
         {
             /* Not implemented.  Squash the exception and proceed normally. */
@@ -6537,7 +6537,7 @@ x86_emulate(
         if ( rc != X86EMUL_OKAY )
             goto done;
 
-        generate_exception_if((msr_val & MSR_MISC_FEATURES_CPUID_FAULTING),
+        generate_exception_if(msr_val & MISC_FEATURES_CPUID_FAULTING,
                               EXC_GP, 0); /* Faulting active? (Inc. CPL test) 
*/
 
         rc = ops->cpuid(_regs.eax, _regs.ecx, &cpuid_leaf, ctxt);
diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
index 2c9b75f..48d80e9 100644
--- a/xen/include/asm-x86/msr-index.h
+++ b/xen/include/asm-x86/msr-index.h
@@ -24,12 +24,18 @@
 #define MSR_PRED_CMD                    0x00000049
 #define PRED_CMD_IBPB                   (_AC(1, ULL) <<  0)
 
+#define MSR_PLATFORM_INFO               0x000000ce
+#define PLATFORM_INFO_CPUID_FAULTING    (_AC(1, ULL) << 31)
+
 #define MSR_ARCH_CAPABILITIES           0x0000010a
 #define ARCH_CAPS_RDCL_NO               (_AC(1, ULL) <<  0)
 #define ARCH_CAPS_IBRS_ALL              (_AC(1, ULL) <<  1)
 #define ARCH_CAPS_RSBA                  (_AC(1, ULL) <<  2)
 #define ARCH_CAPS_SSB_NO                (_AC(1, ULL) <<  4)
 
+#define MSR_MISC_FEATURES_ENABLES       0x00000140
+#define MISC_FEATURES_CPUID_FAULTING    (_AC(1, ULL) <<  0)
+
 #define MSR_EFER                        0xc0000080 /* Extended Feature Enable 
Register */
 #define EFER_SCE                        (_AC(1, ULL) <<  0) /* SYSCALL Enable 
*/
 #define EFER_LME                        (_AC(1, ULL) <<  8) /* Long Mode 
Enable */
@@ -534,15 +540,6 @@
 #define MSR_INTEL_MASK_V3_CPUID80000001 0x00000133
 #define MSR_INTEL_MASK_V3_CPUIDD_01     0x00000134
 
-/* Intel cpuid faulting MSRs */
-#define MSR_INTEL_PLATFORM_INFO                0x000000ce
-#define _MSR_PLATFORM_INFO_CPUID_FAULTING      31
-#define MSR_PLATFORM_INFO_CPUID_FAULTING       (1ULL << 
_MSR_PLATFORM_INFO_CPUID_FAULTING)
-
-#define MSR_INTEL_MISC_FEATURES_ENABLES        0x00000140
-#define _MSR_MISC_FEATURES_CPUID_FAULTING      0
-#define MSR_MISC_FEATURES_CPUID_FAULTING       (1ULL << 
_MSR_MISC_FEATURES_CPUID_FAULTING)
-
 #define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668
 #define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669
 
diff --git a/xen/include/asm-x86/msr.h b/xen/include/asm-x86/msr.h
index afbeb7f..8f9f964 100644
--- a/xen/include/asm-x86/msr.h
+++ b/xen/include/asm-x86/msr.h
@@ -260,7 +260,7 @@ static inline void wrmsr_tsc_aux(uint32_t val)
 /* MSR policy object for shared per-domain MSRs */
 struct msr_domain_policy
 {
-    /* 0x000000ce  MSR_INTEL_PLATFORM_INFO */
+    /* 0x000000ce - MSR_PLATFORM_INFO */
     struct {
         bool available; /* This MSR is non-architectural */
         bool cpuid_faulting;
@@ -288,7 +288,7 @@ struct msr_vcpu_policy
         uint32_t raw;
     } spec_ctrl;
 
-    /* 0x00000140  MSR_INTEL_MISC_FEATURES_ENABLES */
+    /* 0x00000140 - MSR_MISC_FEATURES_ENABLES */
     struct {
         bool available; /* This MSR is non-architectural */
         bool cpuid_faulting;
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Reply via email to