Both get_cyclecount(9) and cpu_ticks() do almost exactly the same thing now assuming set_cputicker() is called with a correct function before get_cyclecount() is used, which is true for x86, at least. The only difference is get_cyclecount() may be inlined but I don't see much gain from the current uses.
Please review the patch. Note I didn't really remove get_cyclecount() just because some random third-party module may use it as it is a documented feature while cpu_ticks is an internal KPI. What do you think? Jung-uk Kim
Index: sys/kern/kern_ktr.c =================================================================== --- sys/kern/kern_ktr.c (revision 219714) +++ sys/kern/kern_ktr.c (working copy) @@ -73,7 +73,7 @@ __FBSDID("$FreeBSD$"); #endif #ifndef KTR_TIME -#define KTR_TIME get_cyclecount() +#define KTR_TIME cpu_ticks() #endif #ifndef KTR_CPU Index: sys/kern/init_main.c =================================================================== --- sys/kern/init_main.c (revision 219714) +++ sys/kern/init_main.c (working copy) @@ -560,7 +560,7 @@ SYSINIT(p0init, SI_SUB_INTRINSIC, SI_ORDER_FIRST, static void proc0_post(void *dummy __unused) { - struct timespec ts; + struct bintime bt; struct proc *p; struct rusage ru; struct thread *td; @@ -590,8 +590,8 @@ proc0_post(void *dummy __unused) /* * Give the ``random'' number generator a thump. */ - nanotime(&ts); - srandom(ts.tv_sec ^ ts.tv_nsec); + bintime(&bt); + srandom(bt.sec ^ bt.frac); } SYSINIT(p0post, SI_SUB_INTRINSIC_POST, SI_ORDER_FIRST, proc0_post, NULL); @@ -601,10 +601,10 @@ random_init(void *dummy __unused) /* * After CPU has been started we have some randomness on most - * platforms via get_cyclecount(). For platforms that don't - * we will reseed random(9) in proc0_post() as well. + * platforms via cpu_ticks(). For platforms that don't we will + * reseed random(9) in proc0_post() as well. */ - srandom(get_cyclecount()); + srandom(cpu_ticks()); } SYSINIT(random, SI_SUB_RANDOM, SI_ORDER_FIRST, random_init, NULL); Index: sys/netinet/sctp_os_bsd.h =================================================================== --- sys/netinet/sctp_os_bsd.h (revision 219714) +++ sys/netinet/sctp_os_bsd.h (working copy) @@ -129,7 +129,7 @@ MALLOC_DECLARE(SCTP_M_MCORE); #if defined(SCTP_LOCAL_TRACE_BUF) -#define SCTP_GET_CYCLECOUNT get_cyclecount() +#define SCTP_GET_CYCLECOUNT cpu_ticks() #define SCTP_CTR6 sctp_log_trace #else Index: sys/dev/de/if_devar.h =================================================================== --- sys/dev/de/if_devar.h (revision 219714) +++ sys/dev/de/if_devar.h (working copy) @@ -903,7 +903,7 @@ typedef u_long tulip_cycle_t; static __inline tulip_cycle_t TULIP_PERFREAD(void) { - return (get_cyclecount()); + return (cpu_ticks()); } #define TULIP_PERFDIFF(s, f) ((f) - (s)) Index: sys/dev/random/randomdev_soft.c =================================================================== --- sys/dev/random/randomdev_soft.c (revision 219714) +++ sys/dev/random/randomdev_soft.c (working copy) @@ -353,8 +353,8 @@ random_yarrow_write(void *buf, int count) chunk = HARVESTSIZE; if (i + chunk >= count) chunk = (u_int)(count - i); - random_harvest_internal(get_cyclecount(), (char *)buf + i, - chunk, 0, 0, RANDOM_WRITE); + random_harvest_internal(cpu_ticks(), (char *)buf + i, chunk, + 0, 0, RANDOM_WRITE); } } Index: sys/dev/random/harvest.c =================================================================== --- sys/dev/random/harvest.c (revision 219714) +++ sys/dev/random/harvest.c (working copy) @@ -78,17 +78,16 @@ random_yarrow_deinit_harvester(void) * Implemented as in indirect call to allow non-inclusion of * the entropy device. * - * XXXRW: get_cyclecount() is cheap on most modern hardware, where cycle - * counters are built in, but on older hardware it will do a real time clock - * read which can be quite expensive. + * XXXRW: cpu_ticks() is cheap on most modern hardware, where cycle counters + * are built in, but on older hardware it will do a real time clock read + * which can be quite expensive. */ void random_harvest(void *entropy, u_int count, u_int bits, u_int frac, enum esource origin) { if (reap_func) - (*reap_func)(get_cyclecount(), entropy, count, bits, frac, - origin); + (*reap_func)(cpu_ticks(), entropy, count, bits, frac, origin); } /* Userland-visible version of read_random */ Index: sys/i386/include/cpu.h =================================================================== --- sys/i386/include/cpu.h (revision 219714) +++ sys/i386/include/cpu.h (working copy) @@ -39,7 +39,6 @@ /* * Definitions unique to i386 cpu support. */ -#include <machine/cputypes.h> #include <machine/psl.h> #include <machine/frame.h> #include <machine/segments.h> @@ -70,13 +69,8 @@ void swi_vm(void *); static __inline uint64_t get_cyclecount(void) { - struct bintime bt; - if (cpu_class == CPUCLASS_486) { - binuptime(&bt); - return ((uint64_t)bt.sec << 56 | bt.frac >> 8); - } - return (rdtsc()); + return (cpu_ticks()); } #endif
_______________________________________________ freebsd-hackers@freebsd.org mailing list http://lists.freebsd.org/mailman/listinfo/freebsd-hackers To unsubscribe, send any mail to "freebsd-hackers-unsubscr...@freebsd.org"