On Thu, Aug 31, 2023 at 04:01:35PM -0500, Scott Cheloha wrote: > This is the next patch in the clock interrupt policy reorganization > series. > > While the hardclock/dt(4) patch is being rewritten we can do this > orthogonal statclock() patch. It needs to get done at some point > anyway, may as well do it now. > > So, this patch moves most of the statclock() code out of the clockintr > layer and cedes control of the statclock() to the scheduler. My thinking > is: (a) statclock() increments p_cpticks and calls schedclock(), so in > practice it is a scheduler interrupt, and (b) in the future it would be > nice if the scheduler could disable the statclock when a CPU is very idle > and maybe save some power. > > All of this should feel familiar. It is equivalent to what we just > did to roundrobin(). > > - Move the contents of the clockintr_statclock() wrapper function > into statclock() and make statclock() a real clockintr callback. > > - clockintr_expiration(), clockintr_nsecuptime(), and > clockintr_schedule() all become public sys/clockintr.h > interfaces for use in statclock(). > > - Tweak statclock() to handle multiple expirations at once. > > - Move the statclock handle from clockintr_queue (cq_statclock) to > schedstate_percpu (spc_statclock). Establish spc_statclock during > sched_init_cpu(). > > - Move the statclock variables from kern_clockintr.c to kern_clock.c. > Move statclock variable initialization from clockintr_init() forward > into initclocks(). > > - Replace the CL_RNDSTAT flag with a new global boolean, > statclock_is_randomized. Update clockintr_init() callers to set > statclock_is_randomized instead of passing CL_RNDSTAT.
Ping. ok? Index: kern/kern_clock.c =================================================================== RCS file: /cvs/src/sys/kern/kern_clock.c,v retrieving revision 1.115 diff -u -p -r1.115 kern_clock.c --- kern/kern_clock.c 23 Aug 2023 01:55:45 -0000 1.115 +++ kern/kern_clock.c 5 Sep 2023 01:13:22 -0000 @@ -39,6 +39,7 @@ #include <sys/param.h> #include <sys/systm.h> +#include <sys/clockintr.h> #include <sys/timeout.h> #include <sys/kernel.h> #include <sys/limits.h> @@ -87,17 +88,42 @@ int ticks = INT_MAX - (15 * 60 * HZ); /* Don't force early wrap around, triggers bug in inteldrm */ volatile unsigned long jiffies; +uint32_t statclock_avg; /* [I] average statclock period (ns) */ +uint32_t statclock_min; /* [I] minimum statclock period (ns) */ +uint32_t statclock_mask; /* [I] set of statclock_min offsets */ +int statclock_is_randomized; /* [I] fixed or pseudo-random period */ + /* * Initialize clock frequencies and start both clocks running. */ void initclocks(void) { + uint32_t half_avg, var; + /* * Let the machine-specific code do its bit. */ cpu_initclocks(); + KASSERT(stathz >= 1 && stathz <= 1000000000); + + /* + * Compute the average statclock() period. Then find var, the + * largest power of two such that var <= statclock_avg / 2. + */ + statclock_avg = 1000000000 / stathz; + half_avg = statclock_avg / 2; + for (var = 1U << 31; var > half_avg; var /= 2) + continue; + + /* + * Set a lower bound for the range using statclock_avg and var. + * The mask for that range is just (var - 1). + */ + statclock_min = statclock_avg - (var / 2); + statclock_mask = var - 1; + KASSERT(profhz >= stathz && profhz <= 1000000000); KASSERT(profhz % stathz == 0); profclock_period = 1000000000 / profhz; @@ -246,12 +272,30 @@ stopprofclock(struct process *pr) * do process and kernel statistics. */ void -statclock(struct clockframe *frame) +statclock(struct clockintr *cl, void *cf) { + uint64_t count, expiration, i, uptime; + struct clockframe *frame = cf; struct cpu_info *ci = curcpu(); struct schedstate_percpu *spc = &ci->ci_schedstate; struct proc *p = curproc; struct process *pr; + uint32_t off; + + if (statclock_is_randomized) { + count = 0; + expiration = clockintr_expiration(cl); + uptime = clockintr_nsecuptime(cl); + while (expiration <= uptime) { + while ((off = (random() & statclock_mask)) == 0) + continue; + expiration += statclock_min + off; + count++; + } + clockintr_schedule(cl, expiration); + } else { + count = clockintr_advance(cl, statclock_avg); + } if (CLKF_USERMODE(frame)) { pr = p->p_p; @@ -259,11 +303,11 @@ statclock(struct clockframe *frame) * Came from user mode; CPU was in user state. * If this process is being profiled record the tick. */ - p->p_uticks++; + p->p_uticks += count; if (pr->ps_nice > NZERO) - spc->spc_cp_time[CP_NICE]++; + spc->spc_cp_time[CP_NICE] += count; else - spc->spc_cp_time[CP_USER]++; + spc->spc_cp_time[CP_USER] += count; } else { /* * Came from kernel mode, so we were: @@ -280,27 +324,30 @@ statclock(struct clockframe *frame) */ if (CLKF_INTR(frame)) { if (p != NULL) - p->p_iticks++; + p->p_iticks += count; spc->spc_cp_time[spc->spc_spinning ? - CP_SPIN : CP_INTR]++; + CP_SPIN : CP_INTR] += count; } else if (p != NULL && p != spc->spc_idleproc) { - p->p_sticks++; + p->p_sticks += count; spc->spc_cp_time[spc->spc_spinning ? - CP_SPIN : CP_SYS]++; - } else + CP_SPIN : CP_SYS] += count; + } else { spc->spc_cp_time[spc->spc_spinning ? - CP_SPIN : CP_IDLE]++; + CP_SPIN : CP_IDLE] += count; + } } if (p != NULL) { - p->p_cpticks++; + p->p_cpticks += count; /* * If no schedclock is provided, call it here at ~~12-25 Hz; * ~~16 Hz is best */ if (schedhz == 0) { - if ((++spc->spc_schedticks & 3) == 0) - schedclock(p); + for (i = 0; i < count; i++) { + if ((++spc->spc_schedticks & 3) == 0) + schedclock(p); + } } } } Index: kern/kern_clockintr.c =================================================================== RCS file: /cvs/src/sys/kern/kern_clockintr.c,v retrieving revision 1.33 diff -u -p -r1.33 kern_clockintr.c --- kern/kern_clockintr.c 26 Aug 2023 22:21:00 -0000 1.33 +++ kern/kern_clockintr.c 5 Sep 2023 01:13:22 -0000 @@ -38,17 +38,10 @@ */ u_int clockintr_flags; /* [I] global state + behavior flags */ uint32_t hardclock_period; /* [I] hardclock period (ns) */ -uint32_t statclock_avg; /* [I] average statclock period (ns) */ -uint32_t statclock_min; /* [I] minimum statclock period (ns) */ -uint32_t statclock_mask; /* [I] set of allowed offsets */ void clockintr_cancel_locked(struct clockintr *); -uint64_t clockintr_expiration(const struct clockintr *); void clockintr_hardclock(struct clockintr *, void *); -uint64_t clockintr_nsecuptime(const struct clockintr *); -void clockintr_schedule(struct clockintr *, uint64_t); void clockintr_schedule_locked(struct clockintr *, uint64_t); -void clockintr_statclock(struct clockintr *, void *); void clockqueue_intrclock_install(struct clockintr_queue *, const struct intrclock *); uint64_t clockqueue_next(const struct clockintr_queue *); @@ -61,8 +54,6 @@ uint64_t nsec_advance(uint64_t *, uint64 void clockintr_init(u_int flags) { - uint32_t half_avg, var; - KASSERT(CPU_IS_PRIMARY(curcpu())); KASSERT(clockintr_flags == 0); KASSERT(!ISSET(flags, ~CL_FLAG_MASK)); @@ -71,24 +62,6 @@ clockintr_init(u_int flags) hardclock_period = 1000000000 / hz; roundrobin_period = hardclock_period * 10; - KASSERT(stathz >= 1 && stathz <= 1000000000); - - /* - * Compute the average statclock() period. Then find var, the - * largest power of two such that var <= statclock_avg / 2. - */ - statclock_avg = 1000000000 / stathz; - half_avg = statclock_avg / 2; - for (var = 1U << 31; var > half_avg; var /= 2) - continue; - - /* - * Set a lower bound for the range using statclock_avg and var. - * The mask for that range is just (var - 1). - */ - statclock_min = statclock_avg - (var / 2); - statclock_mask = var - 1; - SET(clockintr_flags, flags | CL_INIT); } @@ -111,17 +84,12 @@ clockintr_cpu_init(const struct intrcloc if (ic != NULL) clockqueue_intrclock_install(cq, ic); - /* TODO: Remove these from struct clockintr_queue. */ + /* TODO: Remove this from struct clockintr_queue. */ if (cq->cq_hardclock == NULL) { cq->cq_hardclock = clockintr_establish(cq, clockintr_hardclock); if (cq->cq_hardclock == NULL) panic("%s: failed to establish hardclock", __func__); } - if (cq->cq_statclock == NULL) { - cq->cq_statclock = clockintr_establish(cq, clockintr_statclock); - if (cq->cq_statclock == NULL) - panic("%s: failed to establish statclock", __func__); - } /* * Mask CQ_INTRCLOCK while we're advancing the internal clock @@ -166,13 +134,13 @@ clockintr_cpu_init(const struct intrcloc * We can always advance the statclock. There is no reason to * stagger a randomized statclock. */ - if (!ISSET(clockintr_flags, CL_RNDSTAT)) { - if (cq->cq_statclock->cl_expiration == 0) { - clockintr_stagger(cq->cq_statclock, statclock_avg, + if (!statclock_is_randomized) { + if (spc->spc_statclock->cl_expiration == 0) { + clockintr_stagger(spc->spc_statclock, statclock_avg, multiplier, MAXCPUS); } } - clockintr_advance(cq->cq_statclock, statclock_avg); + clockintr_advance(spc->spc_statclock, statclock_avg); /* * XXX Need to find a better place to do this. We can't do it in @@ -493,30 +461,6 @@ clockintr_hardclock(struct clockintr *cl count = clockintr_advance(cl, hardclock_period); for (i = 0; i < count; i++) hardclock(frame); -} - -void -clockintr_statclock(struct clockintr *cl, void *frame) -{ - uint64_t count, expiration, i, uptime; - uint32_t off; - - if (ISSET(clockintr_flags, CL_RNDSTAT)) { - count = 0; - expiration = clockintr_expiration(cl); - uptime = clockintr_nsecuptime(cl); - while (expiration <= uptime) { - while ((off = (random() & statclock_mask)) == 0) - continue; - expiration += statclock_min + off; - count++; - } - clockintr_schedule(cl, expiration); - } else { - count = clockintr_advance(cl, statclock_avg); - } - for (i = 0; i < count; i++) - statclock(frame); } void Index: kern/kern_sched.c =================================================================== RCS file: /cvs/src/sys/kern/kern_sched.c,v retrieving revision 1.88 diff -u -p -r1.88 kern_sched.c --- kern/kern_sched.c 31 Aug 2023 19:29:51 -0000 1.88 +++ kern/kern_sched.c 5 Sep 2023 01:13:24 -0000 @@ -97,6 +97,9 @@ sched_init_cpu(struct cpu_info *ci) spc->spc_roundrobin = clockintr_establish(&ci->ci_queue, roundrobin); if (spc->spc_roundrobin == NULL) panic("%s: clockintr_establish roundrobin", __func__); + spc->spc_statclock = clockintr_establish(&ci->ci_queue, statclock); + if (spc->spc_statclock == NULL) + panic("%s: clockintr_establish statclock", __func__); kthread_create_deferred(sched_kthreads_create, ci); Index: sys/clockintr.h =================================================================== RCS file: /cvs/src/sys/sys/clockintr.h,v retrieving revision 1.10 diff -u -p -r1.10 clockintr.h --- sys/clockintr.h 21 Aug 2023 17:22:04 -0000 1.10 +++ sys/clockintr.h 5 Sep 2023 01:13:24 -0000 @@ -97,7 +97,6 @@ struct clockintr_queue { TAILQ_HEAD(, clockintr) cq_pend;/* [m] pending clockintr list */ struct clockintr *cq_running; /* [m] running clockintr */ struct clockintr *cq_hardclock; /* [o] hardclock handle */ - struct clockintr *cq_statclock; /* [o] statclock handle */ struct intrclock cq_intrclock; /* [I] local interrupt clock */ struct clockintr_stat cq_stat; /* [o] dispatch statistics */ volatile u_int cq_gen; /* [o] cq_stat update generation */ @@ -114,8 +113,7 @@ struct clockintr_queue { #define CL_STATE_MASK 0x00000001 /* Global behavior flags. */ -#define CL_RNDSTAT 0x80000000 /* randomized statclock */ -#define CL_FLAG_MASK 0x80000000 +#define CL_FLAG_MASK 0x00000000 void clockintr_cpu_init(const struct intrclock *); int clockintr_dispatch(void *); @@ -130,6 +128,9 @@ uint64_t clockintr_advance(struct clocki void clockintr_cancel(struct clockintr *); struct clockintr *clockintr_establish(struct clockintr_queue *, void (*)(struct clockintr *, void *)); +uint64_t clockintr_expiration(const struct clockintr *); +uint64_t clockintr_nsecuptime(const struct clockintr *); +void clockintr_schedule(struct clockintr *, uint64_t); void clockintr_stagger(struct clockintr *, uint64_t, u_int, u_int); void clockqueue_init(struct clockintr_queue *); int sysctl_clockintr(int *, u_int, void *, size_t *, void *, size_t); Index: sys/systm.h =================================================================== RCS file: /cvs/src/sys/sys/systm.h,v retrieving revision 1.165 diff -u -p -r1.165 systm.h --- sys/systm.h 23 Aug 2023 01:55:45 -0000 1.165 +++ sys/systm.h 5 Sep 2023 01:13:24 -0000 @@ -234,10 +234,14 @@ int tstohz(const struct timespec *); void realitexpire(void *); extern uint32_t hardclock_period; +extern uint32_t statclock_avg; +extern int statclock_is_randomized; struct clockframe; void hardclock(struct clockframe *); -void statclock(struct clockframe *); + +struct clockintr; +void statclock(struct clockintr *, void *); void initclocks(void); void inittodr(time_t); Index: sys/sched.h =================================================================== RCS file: /cvs/src/sys/sys/sched.h,v retrieving revision 1.61 diff -u -p -r1.61 sched.h --- sys/sched.h 11 Aug 2023 22:02:50 -0000 1.61 +++ sys/sched.h 5 Sep 2023 01:13:24 -0000 @@ -109,6 +109,7 @@ struct schedstate_percpu { struct clockintr *spc_itimer; /* [o] itimer_update handle */ struct clockintr *spc_profclock; /* [o] profclock handle */ struct clockintr *spc_roundrobin; /* [o] roundrobin handle */ + struct clockintr *spc_statclock; /* [o] statclock handle */ u_int spc_nrun; /* procs on the run queues */ Index: arch/amd64/amd64/lapic.c =================================================================== RCS file: /cvs/src/sys/arch/amd64/amd64/lapic.c,v retrieving revision 1.69 diff -u -p -r1.69 lapic.c --- arch/amd64/amd64/lapic.c 23 Aug 2023 01:55:46 -0000 1.69 +++ arch/amd64/amd64/lapic.c 5 Sep 2023 01:13:24 -0000 @@ -498,7 +498,8 @@ lapic_initclocks(void) stathz = hz; profhz = stathz * 10; - clockintr_init(CL_RNDSTAT); + statclock_is_randomized = 1; + clockintr_init(0); } Index: arch/arm64/dev/agtimer.c =================================================================== RCS file: /cvs/src/sys/arch/arm64/dev/agtimer.c,v retrieving revision 1.26 diff -u -p -r1.26 agtimer.c --- arch/arm64/dev/agtimer.c 23 Aug 2023 01:55:46 -0000 1.26 +++ arch/arm64/dev/agtimer.c 5 Sep 2023 01:13:26 -0000 @@ -293,7 +293,8 @@ agtimer_cpu_initclocks(void) stathz = hz; profhz = stathz * 10; - clockintr_init(CL_RNDSTAT); + statclock_is_randomized = 1; + clockintr_init(0); if (sc->sc_ticks_per_second != agtimer_frequency) { agtimer_set_clockrate(agtimer_frequency); Index: arch/arm/cortex/agtimer.c =================================================================== RCS file: /cvs/src/sys/arch/arm/cortex/agtimer.c,v retrieving revision 1.19 diff -u -p -r1.19 agtimer.c --- arch/arm/cortex/agtimer.c 23 Aug 2023 01:55:46 -0000 1.19 +++ arch/arm/cortex/agtimer.c 5 Sep 2023 01:13:26 -0000 @@ -230,7 +230,8 @@ agtimer_cpu_initclocks(void) stathz = hz; profhz = stathz * 10; - clockintr_init(CL_RNDSTAT); + statclock_is_randomized = 1; + clockintr_init(0); if (sc->sc_ticks_per_second != agtimer_frequency) { agtimer_set_clockrate(agtimer_frequency); Index: arch/arm/cortex/amptimer.c =================================================================== RCS file: /cvs/src/sys/arch/arm/cortex/amptimer.c,v retrieving revision 1.18 diff -u -p -r1.18 amptimer.c --- arch/arm/cortex/amptimer.c 23 Aug 2023 01:55:46 -0000 1.18 +++ arch/arm/cortex/amptimer.c 5 Sep 2023 01:13:27 -0000 @@ -287,7 +287,8 @@ amptimer_cpu_initclocks(void) stathz = hz; profhz = hz * 10; - clockintr_init(CL_RNDSTAT); + statclock_is_randomized = 1; + clockintr_init(0); if (sc->sc_ticks_per_second != amptimer_frequency) { amptimer_set_clockrate(amptimer_frequency); Index: arch/armv7/omap/dmtimer.c =================================================================== RCS file: /cvs/src/sys/arch/armv7/omap/dmtimer.c,v retrieving revision 1.20 diff -u -p -r1.20 dmtimer.c --- arch/armv7/omap/dmtimer.c 23 Aug 2023 01:55:46 -0000 1.20 +++ arch/armv7/omap/dmtimer.c 5 Sep 2023 01:13:27 -0000 @@ -232,7 +232,8 @@ dmtimer_cpu_initclocks(void) stathz = hz; profhz = stathz * 10; - clockintr_init(CL_RNDSTAT); + statclock_is_randomized = 1; + clockintr_init(0); sc->sc_ticks_per_second = TIMER_FREQUENCY; /* 32768 */ sc->sc_nsec_cycle_ratio = Index: arch/armv7/omap/gptimer.c =================================================================== RCS file: /cvs/src/sys/arch/armv7/omap/gptimer.c,v retrieving revision 1.21 diff -u -p -r1.21 gptimer.c --- arch/armv7/omap/gptimer.c 23 Aug 2023 01:55:46 -0000 1.21 +++ arch/armv7/omap/gptimer.c 5 Sep 2023 01:13:27 -0000 @@ -198,7 +198,8 @@ gptimer_cpu_initclocks(void) { stathz = hz; profhz = stathz * 10; - clockintr_init(CL_RNDSTAT); + statclock_is_randomized = 1; + clockintr_init(0); gptimer_nsec_cycle_ratio = TIMER_FREQUENCY * (1ULL << 32) / 1000000000; gptimer_nsec_max = UINT64_MAX / gptimer_nsec_cycle_ratio; Index: arch/armv7/sunxi/sxitimer.c =================================================================== RCS file: /cvs/src/sys/arch/armv7/sunxi/sxitimer.c,v retrieving revision 1.22 diff -u -p -r1.22 sxitimer.c --- arch/armv7/sunxi/sxitimer.c 23 Aug 2023 01:55:46 -0000 1.22 +++ arch/armv7/sunxi/sxitimer.c 5 Sep 2023 01:13:27 -0000 @@ -180,7 +180,8 @@ sxitimer_attach(struct device *parent, s stathz = hz; profhz = stathz * 10; - clockintr_init(CL_RNDSTAT); + statclock_is_randomized = 1; + clockintr_init(0); /* stop timer, and set clk src */ bus_space_write_4(sxitimer_iot, sxitimer_ioh, Index: arch/hppa/dev/clock.c =================================================================== RCS file: /cvs/src/sys/arch/hppa/dev/clock.c,v retrieving revision 1.37 diff -u -p -r1.37 clock.c --- arch/hppa/dev/clock.c 23 Aug 2023 01:55:46 -0000 1.37 +++ arch/hppa/dev/clock.c 5 Sep 2023 01:13:27 -0000 @@ -116,7 +116,8 @@ cpu_initclocks(void) stathz = hz; profhz = stathz * 10; - clockintr_init(CL_RNDSTAT); + statclock_is_randomized = 1; + clockintr_init(0); itmr_nsec_cycle_ratio = itmr_freq * (1ULL << 32) / 1000000000; itmr_nsec_max = UINT64_MAX / itmr_nsec_cycle_ratio; Index: arch/i386/i386/lapic.c =================================================================== RCS file: /cvs/src/sys/arch/i386/i386/lapic.c,v retrieving revision 1.56 diff -u -p -r1.56 lapic.c --- arch/i386/i386/lapic.c 23 Aug 2023 01:55:46 -0000 1.56 +++ arch/i386/i386/lapic.c 5 Sep 2023 01:13:28 -0000 @@ -326,7 +326,8 @@ lapic_initclocks(void) stathz = hz; profhz = stathz * 10; - clockintr_init(CL_RNDSTAT); + statclock_is_randomized = 1; + clockintr_init(0); } extern int gettick(void); /* XXX put in header file */ Index: arch/loongson/dev/glxclk.c =================================================================== RCS file: /cvs/src/sys/arch/loongson/dev/glxclk.c,v retrieving revision 1.9 diff -u -p -r1.9 glxclk.c --- arch/loongson/dev/glxclk.c 26 Aug 2023 09:37:43 -0000 1.9 +++ arch/loongson/dev/glxclk.c 5 Sep 2023 01:13:28 -0000 @@ -189,10 +189,11 @@ glxclk_initclock(void) */ stathz = hz = 128; profhz = hz * 10; + statclock_is_randomized = 1; tick = 1000000 / hz; tick_nsec = 1000000000 / hz; - clockintr_init(CL_RNDSTAT); + clockintr_init(0); } void Index: arch/macppc/macppc/clock.c =================================================================== RCS file: /cvs/src/sys/arch/macppc/macppc/clock.c,v retrieving revision 1.56 diff -u -p -r1.56 clock.c --- arch/macppc/macppc/clock.c 23 Aug 2023 01:55:47 -0000 1.56 +++ arch/macppc/macppc/clock.c 5 Sep 2023 01:13:28 -0000 @@ -195,7 +195,8 @@ cpu_initclocks(void) stathz = hz; profhz = stathz * 10; - clockintr_init(CL_RNDSTAT); + statclock_is_randomized = 1; + clockintr_init(0); dec_nsec_cycle_ratio = ticks_per_sec * (1ULL << 32) / 1000000000; dec_nsec_max = UINT64_MAX / dec_nsec_cycle_ratio; Index: arch/mips64/mips64/clock.c =================================================================== RCS file: /cvs/src/sys/arch/mips64/mips64/clock.c,v retrieving revision 1.51 diff -u -p -r1.51 clock.c --- arch/mips64/mips64/clock.c 23 Aug 2023 01:55:47 -0000 1.51 +++ arch/mips64/mips64/clock.c 5 Sep 2023 01:13:28 -0000 @@ -241,7 +241,8 @@ cp0_initclock(void) stathz = hz; profhz = stathz * 10; - clockintr_init(CL_RNDSTAT); + statclock_is_randomized = 1; + clockintr_init(0); } /* Index: arch/powerpc64/powerpc64/clock.c =================================================================== RCS file: /cvs/src/sys/arch/powerpc64/powerpc64/clock.c,v retrieving revision 1.12 diff -u -p -r1.12 clock.c --- arch/powerpc64/powerpc64/clock.c 23 Aug 2023 01:55:47 -0000 1.12 +++ arch/powerpc64/powerpc64/clock.c 5 Sep 2023 01:13:28 -0000 @@ -94,7 +94,8 @@ cpu_initclocks(void) stathz = hz; profhz = stathz * 10; - clockintr_init(CL_RNDSTAT); + statclock_is_randomized = 1; + clockintr_init(0); evcount_attach(&clock_count, "clock", NULL); } Index: arch/riscv64/riscv64/clock.c =================================================================== RCS file: /cvs/src/sys/arch/riscv64/riscv64/clock.c,v retrieving revision 1.11 diff -u -p -r1.11 clock.c --- arch/riscv64/riscv64/clock.c 23 Aug 2023 01:55:47 -0000 1.11 +++ arch/riscv64/riscv64/clock.c 5 Sep 2023 01:13:28 -0000 @@ -92,7 +92,8 @@ cpu_initclocks(void) stathz = hz; profhz = stathz * 10; - clockintr_init(CL_RNDSTAT); + statclock_is_randomized = 1; + clockintr_init(0); riscv_intc_intr_establish(IRQ_TIMER_SUPERVISOR, 0, clock_intr, NULL, NULL); Index: arch/sparc64/sparc64/clock.c =================================================================== RCS file: /cvs/src/sys/arch/sparc64/sparc64/clock.c,v retrieving revision 1.80 diff -u -p -r1.80 clock.c --- arch/sparc64/sparc64/clock.c 23 Aug 2023 01:55:47 -0000 1.80 +++ arch/sparc64/sparc64/clock.c 5 Sep 2023 01:13:28 -0000 @@ -501,7 +501,8 @@ cpu_initclocks(void) stathz = hz; profhz = stathz * 10; - clockintr_init(CL_RNDSTAT); + statclock_is_randomized = 1; + clockintr_init(0); /* Make sure we have a sane cpu_clockrate -- we'll need it */ if (!cpu_clockrate)