On Tue, Sep 05, 2023 at 09:17:27AM -0500, Scott Cheloha wrote:
> mpi@ suggests folding the pseudorandom advance code from
> clockintr_statclock() into the clockintr API itself.  This replaces
> three API calls -- clockintr_expiration(), clockintr_nsecuptime(), and
> clockintr_schedule() -- we just one call to a new function,
> clockintr_advance_random().
>
> I'm fine with it.  A pseudorandom period is an odd thing and
> supporting it is difficult.  Having a single bespoke API to support it
> might be the lesser of two evils.
>
> With this in place, the statclock() patch on tech@ can be simplified.
>
> ok?
>

This seems like a good idea. ok mlarkin

> Index: kern_clockintr.c
> ===================================================================
> RCS file: /cvs/src/sys/kern/kern_clockintr.c,v
> retrieving revision 1.33
> diff -u -p -r1.33 kern_clockintr.c
> --- kern_clockintr.c  26 Aug 2023 22:21:00 -0000      1.33
> +++ kern_clockintr.c  5 Sep 2023 14:11:38 -0000
> @@ -42,8 +42,8 @@ uint32_t statclock_avg;                     /* [I] average
>  uint32_t statclock_min;                      /* [I] minimum statclock period 
> (ns) */
>  uint32_t statclock_mask;             /* [I] set of allowed offsets */
>
> +uint64_t clockintr_advance_random(struct clockintr *, uint64_t, uint32_t);
>  void clockintr_cancel_locked(struct clockintr *);
> -uint64_t clockintr_expiration(const struct clockintr *);
>  void clockintr_hardclock(struct clockintr *, void *);
>  uint64_t clockintr_nsecuptime(const struct clockintr *);
>  void clockintr_schedule(struct clockintr *, uint64_t);
> @@ -345,6 +345,30 @@ clockintr_advance(struct clockintr *cl,
>       return count;
>  }
>
> +/*
> + * Custom version of clockintr_advance() to support a pseudorandom
> + * statclock() period.  Hopefully we can throw this out at some point
> + * in the future.
> + */
> +uint64_t
> +clockintr_advance_random(struct clockintr *cl, uint64_t lo, uint32_t mask)
> +{
> +     uint64_t count = 0;
> +     struct clockintr_queue *cq = cl->cl_queue;
> +     uint32_t off;
> +
> +     KASSERT(cl == &cq->cq_shadow);
> +
> +     while (cl->cl_expiration <= cq->cq_uptime) {
> +             while ((off = (random() & mask)) == 0)
> +                     continue;
> +             cl->cl_expiration += lo + off;
> +             count++;
> +     }
> +     SET(cl->cl_flags, CLST_SHADOW_PENDING);
> +     return count;
> +}
> +
>  void
>  clockintr_cancel(struct clockintr *cl)
>  {
> @@ -402,21 +426,6 @@ clockintr_establish(struct clockintr_que
>       return cl;
>  }
>
> -uint64_t
> -clockintr_expiration(const struct clockintr *cl)
> -{
> -     uint64_t expiration;
> -     struct clockintr_queue *cq = cl->cl_queue;
> -
> -     if (cl == &cq->cq_shadow)
> -             return cl->cl_expiration;
> -
> -     mtx_enter(&cq->cq_mtx);
> -     expiration = cl->cl_expiration;
> -     mtx_leave(&cq->cq_mtx);
> -     return expiration;
> -}
> -
>  void
>  clockintr_schedule(struct clockintr *cl, uint64_t expiration)
>  {
> @@ -478,13 +487,6 @@ clockintr_stagger(struct clockintr *cl,
>       mtx_leave(&cq->cq_mtx);
>  }
>
> -uint64_t
> -clockintr_nsecuptime(const struct clockintr *cl)
> -{
> -     KASSERT(cl == &cl->cl_queue->cq_shadow);
> -     return cl->cl_queue->cq_uptime;
> -}
> -
>  void
>  clockintr_hardclock(struct clockintr *cl, void *frame)
>  {
> @@ -498,20 +500,11 @@ clockintr_hardclock(struct clockintr *cl
>  void
>  clockintr_statclock(struct clockintr *cl, void *frame)
>  {
> -     uint64_t count, expiration, i, uptime;
> -     uint32_t off;
> +     uint64_t count, i;
>
>       if (ISSET(clockintr_flags, CL_RNDSTAT)) {
> -             count = 0;
> -             expiration = clockintr_expiration(cl);
> -             uptime = clockintr_nsecuptime(cl);
> -             while (expiration <= uptime) {
> -                     while ((off = (random() & statclock_mask)) == 0)
> -                             continue;
> -                     expiration += statclock_min + off;
> -                     count++;
> -             }
> -             clockintr_schedule(cl, expiration);
> +             count = clockintr_advance_random(cl, statclock_min,
> +                 statclock_mask);
>       } else {
>               count = clockintr_advance(cl, statclock_avg);
>       }
>

Reply via email to