Author: jkim
Date: Tue Apr 12 23:04:01 2011
New Revision: 220583
URL: http://svn.freebsd.org/changeset/base/220583

Log:
  Reinstate cpu_est_clockrate() support for P-state invariant TSC if APERF and
  MPERF MSRs are available.  It was disabled in r216443.  Remove the earlier
  hack to subtract 0.5% from the calibrated frequency as DELAY(9) is little
  bit more reliable now.

Modified:
  head/sys/amd64/amd64/machdep.c
  head/sys/i386/i386/machdep.c
  head/sys/pc98/pc98/machdep.c

Modified: head/sys/amd64/amd64/machdep.c
==============================================================================
--- head/sys/amd64/amd64/machdep.c      Tue Apr 12 22:48:03 2011        
(r220582)
+++ head/sys/amd64/amd64/machdep.c      Tue Apr 12 23:04:01 2011        
(r220583)
@@ -545,23 +545,20 @@ cpu_flush_dcache(void *ptr, size_t len)
 int
 cpu_est_clockrate(int cpu_id, uint64_t *rate)
 {
+       uint64_t tsc1, tsc2;
+       uint64_t acnt, mcnt;
        register_t reg;
-       uint64_t freq, tsc1, tsc2;
 
        if (pcpu_find(cpu_id) == NULL || rate == NULL)
                return (EINVAL);
-       freq = atomic_load_acq_64(&tsc_freq);
 
-       /* If TSC is P-state invariant, DELAY(9) based logic fails. */
-       if (tsc_is_invariant && freq != 0)
+       /*
+        * If TSC is P-state invariant and APERF/MPERF MSRs do not exist,
+        * DELAY(9) based logic fails.
+        */
+       if (tsc_is_invariant && !tsc_perf_stat)
                return (EOPNOTSUPP);
 
-       /* If we're booting, trust the rate calibrated moments ago. */
-       if (cold && freq != 0) {
-               *rate = freq;
-               return (0);
-       }
-
 #ifdef SMP
        if (smp_cpus > 1) {
                /* Schedule ourselves on the indicated cpu. */
@@ -573,10 +570,23 @@ cpu_est_clockrate(int cpu_id, uint64_t *
 
        /* Calibrate by measuring a short delay. */
        reg = intr_disable();
-       tsc1 = rdtsc();
-       DELAY(1000);
-       tsc2 = rdtsc();
-       intr_restore(reg);
+       if (tsc_is_invariant) {
+               wrmsr(MSR_MPERF, 0);
+               wrmsr(MSR_APERF, 0);
+               tsc1 = rdtsc();
+               DELAY(1000);
+               mcnt = rdmsr(MSR_MPERF);
+               acnt = rdmsr(MSR_APERF);
+               tsc2 = rdtsc();
+               intr_restore(reg);
+               *rate = (tsc2 - tsc1) / 1000 * acnt / mcnt * 1000000;
+       } else {
+               tsc1 = rdtsc();
+               DELAY(1000);
+               tsc2 = rdtsc();
+               intr_restore(reg);
+               *rate = (tsc2 - tsc1) * 1000;
+       }
 
 #ifdef SMP
        if (smp_cpus > 1) {
@@ -586,17 +596,6 @@ cpu_est_clockrate(int cpu_id, uint64_t *
        }
 #endif
 
-       tsc2 -= tsc1;
-       if (freq != 0) {
-               *rate = tsc2 * 1000;
-               return (0);
-       }
-
-       /*
-        * Subtract 0.5% of the total.  Empirical testing has shown that
-        * overhead in DELAY() works out to approximately this value.
-        */
-       *rate = tsc2 * 1000 - tsc2 * 5;
        return (0);
 }
 

Modified: head/sys/i386/i386/machdep.c
==============================================================================
--- head/sys/i386/i386/machdep.c        Tue Apr 12 22:48:03 2011        
(r220582)
+++ head/sys/i386/i386/machdep.c        Tue Apr 12 23:04:01 2011        
(r220583)
@@ -1136,25 +1136,22 @@ cpu_flush_dcache(void *ptr, size_t len)
 int
 cpu_est_clockrate(int cpu_id, uint64_t *rate)
 {
+       uint64_t tsc1, tsc2;
+       uint64_t acnt, mcnt;
        register_t reg;
-       uint64_t freq, tsc1, tsc2;
 
        if (pcpu_find(cpu_id) == NULL || rate == NULL)
                return (EINVAL);
        if ((cpu_feature & CPUID_TSC) == 0)
                return (EOPNOTSUPP);
-       freq = atomic_load_acq_64(&tsc_freq);
 
-       /* If TSC is P-state invariant, DELAY(9) based logic fails. */
-       if (tsc_is_invariant && freq != 0)
+       /*
+        * If TSC is P-state invariant and APERF/MPERF MSRs do not exist,
+        * DELAY(9) based logic fails.
+        */
+       if (tsc_is_invariant && !tsc_perf_stat)
                return (EOPNOTSUPP);
 
-       /* If we're booting, trust the rate calibrated moments ago. */
-       if (cold && freq != 0) {
-               *rate = freq;
-               return (0);
-       }
-
 #ifdef SMP
        if (smp_cpus > 1) {
                /* Schedule ourselves on the indicated cpu. */
@@ -1166,10 +1163,23 @@ cpu_est_clockrate(int cpu_id, uint64_t *
 
        /* Calibrate by measuring a short delay. */
        reg = intr_disable();
-       tsc1 = rdtsc();
-       DELAY(1000);
-       tsc2 = rdtsc();
-       intr_restore(reg);
+       if (tsc_is_invariant) {
+               wrmsr(MSR_MPERF, 0);
+               wrmsr(MSR_APERF, 0);
+               tsc1 = rdtsc();
+               DELAY(1000);
+               mcnt = rdmsr(MSR_MPERF);
+               acnt = rdmsr(MSR_APERF);
+               tsc2 = rdtsc();
+               intr_restore(reg);
+               *rate = (tsc2 - tsc1) / 1000 * acnt / mcnt * 1000000;
+       } else {
+               tsc1 = rdtsc();
+               DELAY(1000);
+               tsc2 = rdtsc();
+               intr_restore(reg);
+               *rate = (tsc2 - tsc1) * 1000;
+       }
 
 #ifdef SMP
        if (smp_cpus > 1) {
@@ -1179,17 +1189,6 @@ cpu_est_clockrate(int cpu_id, uint64_t *
        }
 #endif
 
-       tsc2 -= tsc1;
-       if (freq != 0) {
-               *rate = tsc2 * 1000;
-               return (0);
-       }
-
-       /*
-        * Subtract 0.5% of the total.  Empirical testing has shown that
-        * overhead in DELAY() works out to approximately this value.
-        */
-       *rate = tsc2 * 1000 - tsc2 * 5;
        return (0);
 }
 

Modified: head/sys/pc98/pc98/machdep.c
==============================================================================
--- head/sys/pc98/pc98/machdep.c        Tue Apr 12 22:48:03 2011        
(r220582)
+++ head/sys/pc98/pc98/machdep.c        Tue Apr 12 23:04:01 2011        
(r220583)
@@ -1071,20 +1071,13 @@ cpu_flush_dcache(void *ptr, size_t len)
 int
 cpu_est_clockrate(int cpu_id, uint64_t *rate)
 {
+       uint64_t tsc1, tsc2;
        register_t reg;
-       uint64_t freq, tsc1, tsc2;
 
        if (pcpu_find(cpu_id) == NULL || rate == NULL)
                return (EINVAL);
        if ((cpu_feature & CPUID_TSC) == 0)
                return (EOPNOTSUPP);
-       freq = atomic_load_acq_64(&tsc_freq);
-
-       /* If we're booting, trust the rate calibrated moments ago. */
-       if (cold && freq != 0) {
-               *rate = freq;
-               return (0);
-       }
 
 #ifdef SMP
        if (smp_cpus > 1) {
@@ -1101,6 +1094,7 @@ cpu_est_clockrate(int cpu_id, uint64_t *
        DELAY(1000);
        tsc2 = rdtsc();
        intr_restore(reg);
+       *rate = (tsc2 - tsc1) * 1000;
 
 #ifdef SMP
        if (smp_cpus > 1) {
@@ -1110,17 +1104,6 @@ cpu_est_clockrate(int cpu_id, uint64_t *
        }
 #endif
 
-       tsc2 -= tsc1;
-       if (freq != 0) {
-               *rate = tsc2 * 1000;
-               return (0);
-       }
-
-       /*
-        * Subtract 0.5% of the total.  Empirical testing has shown that
-        * overhead in DELAY() works out to approximately this value.
-        */
-       *rate = tsc2 * 1000 - tsc2 * 5;
        return (0);
 }
 
_______________________________________________
svn-src-all@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to