On 13.01.2022 14:17, Jan Beulich wrote:
> Except in the "clocksource=tsc" case we can replace the indirect calls
> involved in accessing the platform timers by direct ones, as they get
> established once and never changed. To also cover the "tsc" case, invoke
> what read_tsc() resolves to directly. In turn read_tsc() then becomes
> unreachable and hence can move to .init.*.
> 
> Signed-off-by: Jan Beulich <jbeul...@suse.com>

As this actually amends the IBT work, I would have hoped for it to be
viewed as useful. Of course if accepted in general, it would now want
to have __initconst_cf_clobber annotation addition included. Albeit
there's a slight complication: Some of the structures are written to,
so those couldn't really be "const".

Jan

> ---
> v2: Avoid altcall patching becoming conditional.
> 
> --- a/xen/arch/x86/time.c
> +++ b/xen/arch/x86/time.c
> @@ -66,6 +66,7 @@ struct platform_timesource {
>      char *id;
>      char *name;
>      u64 frequency;
> +    /* This hook may only be invoked via the read_counter() wrapper! */
>      u64 (*read_counter)(void);
>      s64 (*init)(struct platform_timesource *);
>      void (*resume)(struct platform_timesource *);
> @@ -578,7 +579,7 @@ static s64 __init init_tsc(struct platfo
>      return ret;
>  }
>  
> -static u64 read_tsc(void)
> +static uint64_t __init read_tsc(void)
>  {
>      return rdtsc_ordered();
>  }
> @@ -810,6 +811,18 @@ static s_time_t __read_platform_stime(u6
>      return (stime_platform_stamp + scale_delta(diff, &plt_scale));
>  }
>  
> +static uint64_t read_counter(void)
> +{
> +    /*
> +     * plt_tsc is put in use only after alternatives patching has occurred,
> +     * hence we can't invoke read_tsc() that way. Special case it here, open-
> +     * coding the function call at the same time.
> +     */
> +    return plt_src.read_counter != read_tsc
> +           ? alternative_call(plt_src.read_counter)
> +           : rdtsc_ordered();
> +}
> +
>  static void plt_overflow(void *unused)
>  {
>      int i;
> @@ -818,7 +831,7 @@ static void plt_overflow(void *unused)
>  
>      spin_lock_irq(&platform_timer_lock);
>  
> -    count = plt_src.read_counter();
> +    count = read_counter();
>      plt_stamp64 += (count - plt_stamp) & plt_mask;
>      plt_stamp = count;
>  
> @@ -854,7 +867,7 @@ static s_time_t read_platform_stime(u64
>      ASSERT(!local_irq_is_enabled());
>  
>      spin_lock(&platform_timer_lock);
> -    plt_counter = plt_src.read_counter();
> +    plt_counter = read_counter();
>      count = plt_stamp64 + ((plt_counter - plt_stamp) & plt_mask);
>      stime = __read_platform_stime(count);
>      spin_unlock(&platform_timer_lock);
> @@ -872,7 +885,7 @@ static void platform_time_calibration(vo
>      unsigned long flags;
>  
>      spin_lock_irqsave(&platform_timer_lock, flags);
> -    count = plt_stamp64 + ((plt_src.read_counter() - plt_stamp) & plt_mask);
> +    count = plt_stamp64 + ((read_counter() - plt_stamp) & plt_mask);
>      stamp = __read_platform_stime(count);
>      stime_platform_stamp = stamp;
>      platform_timer_stamp = count;
> @@ -883,10 +896,10 @@ static void resume_platform_timer(void)
>  {
>      /* Timer source can be reset when backing from S3 to S0 */
>      if ( plt_src.resume )
> -        plt_src.resume(&plt_src);
> +        alternative_vcall(plt_src.resume, &plt_src);
>  
>      plt_stamp64 = platform_timer_stamp;
> -    plt_stamp = plt_src.read_counter();
> +    plt_stamp = read_counter();
>  }
>  
>  static void __init reset_platform_timer(void)
> 
> 


Reply via email to