From: Dapeng Mi <dapeng1...@linux.intel.com>

Move PMU_CAP_{FW_WRITES,LBR_FMT} into msr-index.h and rename them with
PERF_CAP prefix to keep consistent with other perf capabilities macros.

No functional change intended.

Signed-off-by: Dapeng Mi <dapeng1...@linux.intel.com>
Signed-off-by: Mingwei Zhang <mizh...@google.com>
---
 arch/x86/include/asm/msr-index.h | 15 +++++++++------
 arch/x86/kvm/vmx/capabilities.h  |  3 ---
 arch/x86/kvm/vmx/pmu_intel.c     |  4 ++--
 arch/x86/kvm/vmx/vmx.c           | 12 ++++++------
 4 files changed, 17 insertions(+), 17 deletions(-)

diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 72765b2fe0d8..ca70846ffd55 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -305,12 +305,15 @@
 #define PERF_CAP_PT_IDX                        16
 
 #define MSR_PEBS_LD_LAT_THRESHOLD      0x000003f6
-#define PERF_CAP_PEBS_TRAP             BIT_ULL(6)
-#define PERF_CAP_ARCH_REG              BIT_ULL(7)
-#define PERF_CAP_PEBS_FORMAT           0xf00
-#define PERF_CAP_PEBS_BASELINE         BIT_ULL(14)
-#define PERF_CAP_PEBS_MASK     (PERF_CAP_PEBS_TRAP | PERF_CAP_ARCH_REG | \
-                                PERF_CAP_PEBS_FORMAT | PERF_CAP_PEBS_BASELINE)
+
+#define PERF_CAP_LBR_FMT               0x3f
+#define PERF_CAP_PEBS_TRAP             BIT_ULL(6)
+#define PERF_CAP_ARCH_REG              BIT_ULL(7)
+#define PERF_CAP_PEBS_FORMAT           0xf00
+#define PERF_CAP_FW_WRITES             BIT_ULL(13)
+#define PERF_CAP_PEBS_BASELINE         BIT_ULL(14)
+#define PERF_CAP_PEBS_MASK             (PERF_CAP_PEBS_TRAP | PERF_CAP_ARCH_REG 
| \
+                                        PERF_CAP_PEBS_FORMAT | 
PERF_CAP_PEBS_BASELINE)
 
 #define MSR_IA32_RTIT_CTL              0x00000570
 #define RTIT_CTL_TRACEEN               BIT(0)
diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
index fac2c80ddbab..013536fde10b 100644
--- a/arch/x86/kvm/vmx/capabilities.h
+++ b/arch/x86/kvm/vmx/capabilities.h
@@ -21,9 +21,6 @@ extern int __read_mostly pt_mode;
 #define PT_MODE_SYSTEM         0
 #define PT_MODE_HOST_GUEST     1
 
-#define PMU_CAP_FW_WRITES      (1ULL << 13)
-#define PMU_CAP_LBR_FMT                0x3f
-
 struct nested_vmx_msrs {
        /*
         * We only store the "true" versions of the VMX capability MSRs. We
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 425e93d4b1c6..fc017e9a6a0c 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -118,7 +118,7 @@ static inline u64 vcpu_get_perf_capabilities(struct 
kvm_vcpu *vcpu)
 
 static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu)
 {
-       return (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_FW_WRITES) != 0;
+       return (vcpu_get_perf_capabilities(vcpu) & PERF_CAP_FW_WRITES) != 0;
 }
 
 static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
@@ -543,7 +543,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 
        perf_capabilities = vcpu_get_perf_capabilities(vcpu);
        if (cpuid_model_is_consistent(vcpu) &&
-           (perf_capabilities & PMU_CAP_LBR_FMT))
+           (perf_capabilities & PERF_CAP_LBR_FMT))
                memcpy(&lbr_desc->records, &vmx_lbr_caps, sizeof(vmx_lbr_caps));
        else
                lbr_desc->records.nr = 0;
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index ca1c53f855e0..9c4b3c2b1d65 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -2188,7 +2188,7 @@ static u64 vmx_get_supported_debugctl(struct kvm_vcpu 
*vcpu, bool host_initiated
            (host_initiated || guest_cpu_cap_has(vcpu, 
X86_FEATURE_BUS_LOCK_DETECT)))
                debugctl |= DEBUGCTLMSR_BUS_LOCK_DETECT;
 
-       if ((kvm_caps.supported_perf_cap & PMU_CAP_LBR_FMT) &&
+       if ((kvm_caps.supported_perf_cap & PERF_CAP_LBR_FMT) &&
            (host_initiated || intel_pmu_lbr_is_enabled(vcpu)))
                debugctl |= DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
 
@@ -2464,9 +2464,9 @@ int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data 
*msr_info)
                        vmx->pt_desc.guest.addr_a[index / 2] = data;
                break;
        case MSR_IA32_PERF_CAPABILITIES:
-               if (data & PMU_CAP_LBR_FMT) {
-                       if ((data & PMU_CAP_LBR_FMT) !=
-                           (kvm_caps.supported_perf_cap & PMU_CAP_LBR_FMT))
+               if (data & PERF_CAP_LBR_FMT) {
+                       if ((data & PERF_CAP_LBR_FMT) !=
+                           (kvm_caps.supported_perf_cap & PERF_CAP_LBR_FMT))
                                return 1;
                        if (!cpuid_model_is_consistent(vcpu))
                                return 1;
@@ -7907,7 +7907,7 @@ void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
 
 static __init u64 vmx_get_perf_capabilities(void)
 {
-       u64 perf_cap = PMU_CAP_FW_WRITES;
+       u64 perf_cap = PERF_CAP_FW_WRITES;
 
        if (!enable_pmu)
                return 0;
@@ -7924,7 +7924,7 @@ static __init u64 vmx_get_perf_capabilities(void)
                if (!vmx_lbr_caps.has_callstack)
                        memset(&vmx_lbr_caps, 0, sizeof(vmx_lbr_caps));
                else if (vmx_lbr_caps.nr)
-                       perf_cap |= kvm_host.perf_capabilities & 
PMU_CAP_LBR_FMT;
+                       perf_cap |= kvm_host.perf_capabilities & 
PERF_CAP_LBR_FMT;
        }
 
        if (vmx_pebs_supported()) {
-- 
2.49.0.395.g12beb8f557-goog


Reply via email to