On Mon, Mar 24, 2025 at 05:30:49PM +0000, Mingwei Zhang wrote:

> @@ -1822,7 +1835,7 @@ extern int perf_event_period(struct perf_event *event, 
> u64 value);
>  extern u64 perf_event_pause(struct perf_event *event, bool reset);
>  int perf_get_mediated_pmu(void);
>  void perf_put_mediated_pmu(void);
> -void perf_guest_enter(void);
> +void perf_guest_enter(u32 guest_lvtpc);
>  void perf_guest_exit(void);
>  #else /* !CONFIG_PERF_EVENTS: */
>  static inline void *
> @@ -1921,7 +1934,7 @@ static inline int perf_get_mediated_pmu(void)
>  }
>  
>  static inline void perf_put_mediated_pmu(void)                       { }
> -static inline void perf_guest_enter(void)                    { }
> +static inline void perf_guest_enter(u32 guest_lvtpc)         { }
>  static inline void perf_guest_exit(void)                     { }
>  #endif
>  
> diff --git a/kernel/events/core.c b/kernel/events/core.c
> index d05487d465c9..406b86641f02 100644
> --- a/kernel/events/core.c
> +++ b/kernel/events/core.c
> @@ -451,6 +451,7 @@ static inline bool is_include_guest_event(struct 
> perf_event *event)
>  static LIST_HEAD(pmus);
>  static DEFINE_MUTEX(pmus_lock);
>  static struct srcu_struct pmus_srcu;
> +static DEFINE_PER_CPU(struct mediated_pmus_list, mediated_pmus);
>  static cpumask_var_t perf_online_mask;
>  static cpumask_var_t perf_online_core_mask;
>  static cpumask_var_t perf_online_die_mask;
> @@ -6053,8 +6054,26 @@ static inline void perf_host_exit(struct 
> perf_cpu_context *cpuctx)
>       }
>  }
>  
> +static void perf_switch_guest_ctx(bool enter, u32 guest_lvtpc)
> +{
> +     struct mediated_pmus_list *pmus = this_cpu_ptr(&mediated_pmus);
> +     struct perf_cpu_pmu_context *cpc;
> +     struct pmu *pmu;
> +
> +     lockdep_assert_irqs_disabled();
> +
> +     rcu_read_lock();
> +     list_for_each_entry_rcu(cpc, &pmus->list, mediated_entry) {
> +             pmu = cpc->epc.pmu;
> +
> +             if (pmu->switch_guest_ctx)
> +                     pmu->switch_guest_ctx(enter, (void *)&guest_lvtpc);
> +     }
> +     rcu_read_unlock();
> +}
> +
>  /* When entering a guest, schedule out all exclude_guest events. */
> -void perf_guest_enter(void)
> +void perf_guest_enter(u32 guest_lvtpc)
>  {
>       struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
>  
> @@ -6067,6 +6086,8 @@ void perf_guest_enter(void)
>  
>       perf_host_exit(cpuctx);
>  
> +     perf_switch_guest_ctx(true, guest_lvtpc);
> +
>       __this_cpu_write(perf_in_guest, true);
>  
>  unlock:

This, I'm still utterly hating on that lvtpc argument. That doesn't
belong here. Make it go away.


Reply via email to