On Mon, Mar 24, 2025, Mingwei Zhang wrote:
> From: Sandipan Das <sandipan....@amd.com>
> 
> Add all PMU-related MSRs (including legacy K7 MSRs) to the list of
> possible direct access MSRs.  Most of them will not be intercepted when
> using passthrough PMU.
> 
> Signed-off-by: Sandipan Das <sandipan....@amd.com>
> Signed-off-by: Mingwei Zhang <mizh...@google.com>
> ---
>  arch/x86/kvm/svm/svm.c | 24 ++++++++++++++++++++++++
>  arch/x86/kvm/svm/svm.h |  2 +-
>  2 files changed, 25 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index a713c803a3a3..bff351992468 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -143,6 +143,30 @@ static const struct svm_direct_access_msrs {
>       { .index = X2APIC_MSR(APIC_TMICT),              .always = false },
>       { .index = X2APIC_MSR(APIC_TMCCT),              .always = false },
>       { .index = X2APIC_MSR(APIC_TDCR),               .always = false },
> +     { .index = MSR_K7_EVNTSEL0,                     .always = false },

These are always intercepted, i.e. don't belong in this list.

> +     { .index = MSR_K7_PERFCTR0,                     .always = false },
> +     { .index = MSR_K7_EVNTSEL1,                     .always = false },
> +     { .index = MSR_K7_PERFCTR1,                     .always = false },
> +     { .index = MSR_K7_EVNTSEL2,                     .always = false },
> +     { .index = MSR_K7_PERFCTR2,                     .always = false },
> +     { .index = MSR_K7_EVNTSEL3,                     .always = false },
> +     { .index = MSR_K7_PERFCTR3,                     .always = false },
> +     { .index = MSR_F15H_PERF_CTL0,                  .always = false },
> +     { .index = MSR_F15H_PERF_CTR0,                  .always = false },
> +     { .index = MSR_F15H_PERF_CTL1,                  .always = false },
> +     { .index = MSR_F15H_PERF_CTR1,                  .always = false },
> +     { .index = MSR_F15H_PERF_CTL2,                  .always = false },
> +     { .index = MSR_F15H_PERF_CTR2,                  .always = false },
> +     { .index = MSR_F15H_PERF_CTL3,                  .always = false },
> +     { .index = MSR_F15H_PERF_CTR3,                  .always = false },
> +     { .index = MSR_F15H_PERF_CTL4,                  .always = false },
> +     { .index = MSR_F15H_PERF_CTR4,                  .always = false },
> +     { .index = MSR_F15H_PERF_CTL5,                  .always = false },
> +     { .index = MSR_F15H_PERF_CTR5,                  .always = false },
> +     { .index = MSR_AMD64_PERF_CNTR_GLOBAL_CTL,      .always = false },
> +     { .index = MSR_AMD64_PERF_CNTR_GLOBAL_STATUS,   .always = false },
> +     { .index = MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,       .always = false 
> },
> +     { .index = MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET,       .always = false 
> },
>       { .index = MSR_INVALID,                         .always = false },
>  };

As with the Intel patch, this absolutely belongs in the patch that supports
disabling intercepts.

> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> index 9d7cdb8fbf87..ae71bf5f12d0 100644
> --- a/arch/x86/kvm/svm/svm.h
> +++ b/arch/x86/kvm/svm/svm.h
> @@ -44,7 +44,7 @@ static inline struct page *__sme_pa_to_page(unsigned long 
> pa)
>  #define      IOPM_SIZE PAGE_SIZE * 3
>  #define      MSRPM_SIZE PAGE_SIZE * 2
>  
> -#define MAX_DIRECT_ACCESS_MSRS       48
> +#define MAX_DIRECT_ACCESS_MSRS       72
>  #define MSRPM_OFFSETS        32
>  extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
>  extern bool npt_enabled;
> -- 
> 2.49.0.395.g12beb8f557-goog
> 

Reply via email to