Hi,

I'm wondering the code for checking whether a high-precision code is
available should be in mach_clock.c or in the machine-dependent
directory. In this patch it's in the machine-dependent directory. It can
ensure the backward compactability. Otherwise compiling a gnumach with
only '../configure' will cause page fault at the startup.
```
+uint32_t
+hpclock_read_counter(void)
+{
+    if (hpet_addr != NULL)
+       return HPET32(HPET_COUNTER);
+    else
+       return 0;
+}
+
+uint32_t
+hpclock_get_counter_period_nsec(void)
+{
+    if (hpet_addr != NULL)
+       return hpet_period_nsec;
+    else
+       return 0;
+}
```

Zhaoming
---
 i386/i386/apic.c        | 17 +++++++++++++++++
 i386/i386at/model_dep.c |  2 ++
 kern/mach_clock.c       | 36 +++++++++++++++++++++++++++++++++++-
 kern/mach_clock.h       |  4 ++++
 4 files changed, 58 insertions(+), 1 deletion(-)

diff --git a/i386/i386/apic.c b/i386/i386/apic.c
index 77d555b5..6cacd5fa 100644
--- a/i386/i386/apic.c
+++ b/i386/i386/apic.c
@@ -479,3 +479,20 @@ hpet_mdelay(uint32_t ms)
     hpet_udelay(ms * 1000);
 }
 
+uint32_t
+hpclock_read_counter(void)
+{
+    if (hpet_addr != NULL)
+       return HPET32(HPET_COUNTER);
+    else
+       return 0;
+}
+
+uint32_t
+hpclock_get_counter_period_nsec(void)
+{
+    if (hpet_addr != NULL)
+       return hpet_period_nsec;
+    else
+       return 0;
+}
diff --git a/i386/i386at/model_dep.c b/i386/i386at/model_dep.c
index 30449c37..42dadeb8 100644
--- a/i386/i386at/model_dep.c
+++ b/i386/i386at/model_dep.c
@@ -223,7 +223,9 @@ void machine_init(void)
         */
        gdt_descr_tmp.linear_base += apboot_addr;
        apboot_jmp_offset += apboot_addr;
+#endif
 
+#ifdef APIC
        /*
         * Initialize the HPET
         */
diff --git a/kern/mach_clock.c b/kern/mach_clock.c
index 5501b7b8..9bae3439 100644
--- a/kern/mach_clock.c
+++ b/kern/mach_clock.c
@@ -83,6 +83,15 @@ unsigned     tickadj = 500 / HZ;     /* can adjust 100 usecs 
per second */
 unsigned       bigadj = 1000000;       /* adjust 10*tickadj if adjustment
                                           > bigadj */
 
+/* A high-precision (hardware) clock is taken into account to increase the
+ * accuracy of the functions used for getting time (e.g. host_get_time64()).
+ * The counter of the clock is read once in every clock interrupt.  When any
+ * of the functions used for getting time is called, the counter is read again
+ * and the difference between these two read is multiplied by the counter
+ * period and added to the read value from time or uptime to get a more
+ * accurate time read.  */
+uint32_t       last_hpc_read = 0;
+
 /*
  *     This update protocol, with a check value, allows
  *             do {
@@ -128,7 +137,8 @@ MACRO_BEGIN                                                 
        \
                __sync_synchronize();                                   \
                (time)->nanoseconds = mtime->time_value.nanoseconds;    \
                __sync_synchronize();                                   \
-       } while ((time)->seconds != mtime->check_seconds64);    \
+       } while ((time)->seconds != mtime->check_seconds64);            \
+       time_value64_add_hpc(time);                                     \
 MACRO_END
 
 #define read_mapped_uptime(uptime)                                     \
@@ -139,6 +149,7 @@ MACRO_BEGIN                                                 
        \
                (uptime)->nanoseconds = mtime->uptime_value.nanoseconds;\
                __sync_synchronize();                                   \
        } while ((uptime)->seconds != mtime->check_upseconds64);        \
+       time_value64_add_hpc(uptime);                                   \
 MACRO_END
 
 def_simple_lock_irq_data(static,       timer_lock)     /* lock for ... */
@@ -292,6 +303,7 @@ void clock_interrupt(
                }
            }
        }
+       last_hpc_read = hpclock_read_counter();
 }
 
 /*
@@ -426,6 +438,28 @@ clock_boottime_update(const struct time_value64 *new_time)
        time_value64_add(&clock_boottime_offset, &delta);
 }
 
+/*
+ * Add the time value since last clock interrupt in nanosecond.
+ */
+static void
+time_value64_add_hpc(time_value64_t *value)
+{
+       uint32_t now = hpclock_read_counter();
+       /* Time since last clock interrupt in nanosecond.  */
+       int64_t ns = (now - last_hpc_read) * hpclock_get_counter_period_nsec();
+
+       /* Limit the value of ns under the period of a clock interrupt in case
+          imprecise computation causes ns greater than the clock interrupt
+          period.  */
+       if (ns > tick * 1000)
+           /* Let ns stuck at the end of the clock interrupt period when
+              something bad happens.  */
+           ns = (tick * 1000) - 1;
+
+       time_value64_add_nanos(value, ns);
+}
+
+
 /*
  * Record a timestamp in STAMP.  Records values in the boot-time clock
  * frame.
diff --git a/kern/mach_clock.h b/kern/mach_clock.h
index d4f04f5e..e83b638c 100644
--- a/kern/mach_clock.h
+++ b/kern/mach_clock.h
@@ -110,4 +110,8 @@ extern boolean_t untimeout(timer_func_t *fcn, const void 
*param);
 extern int timeopen(dev_t dev, int flag, io_req_t ior);
 extern void timeclose(dev_t dev, int flag);
 
+/* For high-precision clocks.  */
+extern uint32_t hpclock_read_counter(void);
+extern uint32_t hpclock_get_counter_period_nsec(void);
+
 #endif /* _KERN_MACH_CLOCK_H_ */
-- 
2.47.2


Reply via email to