2017-01-13 10:01-0200, Marcelo Tosatti:
> Expose the realtime host clock and save the TSC value
> used for the clock calculation.
> 
> Signed-off-by: Marcelo Tosatti <mtosa...@redhat.com>
> 
> ---
>  arch/x86/kvm/x86.c |   38 ++++++++++++++++++++++++++++++++++++++
>  1 file changed, 38 insertions(+)
> 
> Index: kvm-ptpdriver/arch/x86/kvm/x86.c
> ===================================================================
> --- kvm-ptpdriver.orig/arch/x86/kvm/x86.c     2017-01-13 08:59:03.015895353 
> -0200
> +++ kvm-ptpdriver/arch/x86/kvm/x86.c  2017-01-13 09:04:46.581415259 -0200
> @@ -1139,6 +1139,8 @@
>  
>       u64             boot_ns;
>       u64             nsec_base;
> +     u64             wall_time_sec;
> +     u64             wall_time_snsec;

The leading "s" in "snsec" looks like a copy-paste residue.

>  };
>  
>  static struct pvclock_gtod_data pvclock_gtod_data;
> @@ -1162,6 +1164,9 @@
>       vdata->boot_ns                  = boot_ns;
>       vdata->nsec_base                = tk->tkr_mono.xtime_nsec;
>  
> +     vdata->wall_time_sec            = tk->xtime_sec;
> +     vdata->wall_time_snsec          = tk->tkr_mono.xtime_nsec;

Using tk->tkr_mono offsets for real time seems wrong -- what happens if
the real time is half a second shifted from monotonic time?

If it's ok, then vdata->nsec_base == vdata->wall_time_snsec, so we don't
need it.

> +
>       write_seqcount_end(&vdata->seq);
>  }
>  #endif
> @@ -1623,6 +1628,28 @@
>       return mode;
>  }
>  
> +static int do_realtime(struct timespec *ts, cycle_t *cycle_now)

This is too similar to do_monotonic_boot(), but I don't see a solution
that is both nice and efficient. :(

(It usually means macros or copying pvclock_gtod_data.)

> +{
> +     struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
> +     unsigned long seq;
> +     int mode;
> +     u64 ns;
> +
> +     do {
> +             seq = read_seqcount_begin(&gtod->seq);
> +             mode = gtod->clock.vclock_mode;
> +             ts->tv_sec = gtod->wall_time_sec;
> +             ns = gtod->wall_time_snsec;
> +             ns += vgettsc(cycle_now);
> +             ns >>= gtod->clock.shift;
> +     } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
> +
> +     ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
> +     ts->tv_nsec = ns;
> +
> +     return mode;
> +}
> +
>  /* returns true if host is using tsc clocksource */
>  static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now)
>  {
> @@ -1632,6 +1659,17 @@
>  
>       return do_monotonic_boot(kernel_ns, cycle_now) == VCLOCK_TSC;
>  }
> +
> +/* returns true if host is using tsc clocksource */
> +static bool kvm_get_walltime_and_clockread(struct timespec *ts,
> +                                        cycle_t *cycle_now)
> +{
> +     /* checked again under seqlock below */
> +     if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC)
> +             return false;
> +
> +     return do_realtime(ts, cycle_now) == VCLOCK_TSC;
> +}
>  #endif
>  
>  /*
> 
> 

Reply via email to