Applied, thanks!

Zhaoming Luo, le lun. 24 mars 2025 12:25:51 +0800, a ecrit:
> Integrate HPET so host_get_time, host_get_time64, and host_get_uptime64
> are more precise. The highest precision can be 10ns when this patch is
> applied.
> 
> * i386/i386/apic.c: Implement the two high-precision clock interface functions
>   added in this patch for i386.
> * i386/i386at/model_dep.c: Initialize HPET if APIC is defined
> * kern/mach_clock.c: Integrate the high-precision clocks to have the
>   10ns precise time values.
> * kern/mach_clock.h: Add two new interface functions for accessing the
>   high-precision clocks.
> 
> ---
>  i386/i386/apic.c        | 18 ++++++++++++++++++
>  i386/i386at/model_dep.c |  2 ++
>  kern/mach_clock.c       | 34 +++++++++++++++++++++++++++++++++-
>  kern/mach_clock.h       |  4 ++++
>  4 files changed, 57 insertions(+), 1 deletion(-)
> 
> diff --git a/i386/i386/apic.c b/i386/i386/apic.c
> index 77d555b5..3852b9aa 100644
> --- a/i386/i386/apic.c
> +++ b/i386/i386/apic.c
> @@ -479,3 +479,21 @@ hpet_mdelay(uint32_t ms)
>      hpet_udelay(ms * 1000);
>  }
>  
> +/* This function is called in clock_interrupt(), so it's possible to be 
> called
> +   when HPET is not available.  */
> +uint32_t
> +hpclock_read_counter(void)
> +{
> +    /* We assume the APIC machines have HPET.  */
> +#ifdef APIC
> +    return HPET32(HPET_COUNTER);
> +#else
> +    return 0;
> +#endif
> +}
> +
> +uint32_t
> +hpclock_get_counter_period_nsec(void)
> +{
> +    return hpet_period_nsec;
> +}
> diff --git a/i386/i386at/model_dep.c b/i386/i386at/model_dep.c
> index 30449c37..42dadeb8 100644
> --- a/i386/i386at/model_dep.c
> +++ b/i386/i386at/model_dep.c
> @@ -223,7 +223,9 @@ void machine_init(void)
>        */
>       gdt_descr_tmp.linear_base += apboot_addr;
>       apboot_jmp_offset += apboot_addr;
> +#endif
>  
> +#ifdef APIC
>       /*
>        * Initialize the HPET
>        */
> diff --git a/kern/mach_clock.c b/kern/mach_clock.c
> index 5501b7b8..48f673a4 100644
> --- a/kern/mach_clock.c
> +++ b/kern/mach_clock.c
> @@ -83,6 +83,15 @@ unsigned   tickadj = 500 / HZ;     /* can adjust 100 usecs 
> per second */
>  unsigned     bigadj = 1000000;       /* adjust 10*tickadj if adjustment
>                                          > bigadj */
>  
> +/* A high-precision (hardware) clock is taken into account to increase the
> + * accuracy of the functions used for getting time (e.g. host_get_time64()).
> + * The counter of the clock is read once in every clock interrupt.  When any
> + * of the functions used for getting time is called, the counter is read 
> again
> + * and the difference between these two read is multiplied by the counter
> + * period and added to the read value from time or uptime to get a more
> + * accurate time read.  */
> +uint32_t     last_hpc_read = 0;
> +
>  /*
>   *   This update protocol, with a check value, allows
>   *           do {
> @@ -128,7 +137,8 @@ MACRO_BEGIN                                               
>                 \
>               __sync_synchronize();                                   \
>               (time)->nanoseconds = mtime->time_value.nanoseconds;    \
>               __sync_synchronize();                                   \
> -     } while ((time)->seconds != mtime->check_seconds64);    \
> +     } while ((time)->seconds != mtime->check_seconds64);            \
> +     time_value64_add_hpc(time);                                     \
>  MACRO_END
>  
>  #define read_mapped_uptime(uptime)                                   \
> @@ -139,6 +149,7 @@ MACRO_BEGIN                                               
>                 \
>               (uptime)->nanoseconds = mtime->uptime_value.nanoseconds;\
>               __sync_synchronize();                                   \
>       } while ((uptime)->seconds != mtime->check_upseconds64);        \
> +     time_value64_add_hpc(uptime);                                   \
>  MACRO_END
>  
>  def_simple_lock_irq_data(static,     timer_lock)     /* lock for ... */
> @@ -292,6 +303,7 @@ void clock_interrupt(
>               }
>           }
>       }
> +     last_hpc_read = hpclock_read_counter();
>  }
>  
>  /*
> @@ -426,6 +438,26 @@ clock_boottime_update(const struct time_value64 
> *new_time)
>       time_value64_add(&clock_boottime_offset, &delta);
>  }
>  
> +/*
> + * Add the time value since last clock interrupt in nanosecond.
> + */
> +static void
> +time_value64_add_hpc(time_value64_t *value)
> +{
> +     uint32_t now = hpclock_read_counter();
> +     /* Time since last clock interrupt in nanosecond.  */
> +     int64_t ns = (now - last_hpc_read) * hpclock_get_counter_period_nsec();
> +
> +     /* Limit the value of ns under the period of a clock interrupt.  */
> +     if (ns >= tick * 1000)
> +         /* Let ns stuck at the end of the clock interrupt period when
> +            something bad happens.  */
> +         ns = (tick * 1000) - 1;
> +
> +     time_value64_add_nanos(value, ns);
> +}
> +
> +
>  /*
>   * Record a timestamp in STAMP.  Records values in the boot-time clock
>   * frame.
> diff --git a/kern/mach_clock.h b/kern/mach_clock.h
> index d4f04f5e..e83b638c 100644
> --- a/kern/mach_clock.h
> +++ b/kern/mach_clock.h
> @@ -110,4 +110,8 @@ extern boolean_t untimeout(timer_func_t *fcn, const void 
> *param);
>  extern int timeopen(dev_t dev, int flag, io_req_t ior);
>  extern void timeclose(dev_t dev, int flag);
>  
> +/* For high-precision clocks.  */
> +extern uint32_t hpclock_read_counter(void);
> +extern uint32_t hpclock_get_counter_period_nsec(void);
> +
>  #endif /* _KERN_MACH_CLOCK_H_ */
> -- 
> 2.47.2
> 
> 

-- 
Samuel
> No manual is ever necessary.
May I politely interject here: BULLSHIT.  That's the biggest Apple lie of all!
(Discussion in comp.os.linux.misc on the intuitiveness of interfaces.)

Reply via email to