On Fri, Dec 09, 2016 at 05:58:34PM +0000, Gleb Smirnoff wrote:
> Author: glebius
> Date: Fri Dec  9 17:58:34 2016
> New Revision: 309745
> URL: https://svnweb.freebsd.org/changeset/base/309745
> 
> Log:
>   Provide counter_ratecheck(), a MP-friendly substitution to ppsratecheck().
>   When rated event happens at a very quick rate, the ppsratecheck() is not
>   only racy, but also becomes a performance bottleneck.
>   
>   Together with:      rrs, jtl
> 
> Modified:
>   head/share/man/man9/counter.9
>   head/sys/kern/subr_counter.c
>   head/sys/sys/counter.h
> 
> Modified: head/share/man/man9/counter.9
> ==============================================================================
> --- head/share/man/man9/counter.9     Fri Dec  9 17:58:07 2016        
> (r309744)
> +++ head/share/man/man9/counter.9     Fri Dec  9 17:58:34 2016        
> (r309745)
> @@ -25,7 +25,7 @@
>  .\"
>  .\" $FreeBSD$
>  .\"
> -.Dd March 14, 2016
> +.Dd December 8, 2016
>  .Dt COUNTER 9
>  .Os
>  .Sh NAME
> @@ -51,6 +51,8 @@
>  .Fn counter_u64_fetch "counter_u64_t c"
>  .Ft void
>  .Fn counter_u64_zero "counter_u64_t c"
> +.Ft int64_t
> +.Fn counter_ratecheck "struct counter_rate *cr" "int64_t limit"
>  .In sys/sysctl.h
>  .Fn SYSCTL_COUNTER_U64 parent nbr name access ptr descr
>  .Fn SYSCTL_ADD_COUNTER_U64 ctx parent nbr name access ptr descr
> @@ -128,6 +130,18 @@ value for any moment.
>  Clear the counter
>  .Fa c
>  and set it to zero.
> +.It Fn counter_ratecheck cr limit
> +The function is a multiprocessor-friendly version of
> +.Fn ppsratecheck ,
> +which uses
> +.Nm
> +internally.
> +Returns non-negative value if the rate isn't yet reached during the current
> +second, and a negative value otherwise.
> +If the limit was reached on previous second, but was just reset back to zero,
> +then
> +.Fn counter_ratecheck
> +returns number of events since previous reset.
>  .It Fn SYSCTL_COUNTER_U64 parent nbr name access ptr descr
>  Declare a static
>  .Xr sysctl
> 
> Modified: head/sys/kern/subr_counter.c
> ==============================================================================
> --- head/sys/kern/subr_counter.c      Fri Dec  9 17:58:07 2016        
> (r309744)
> +++ head/sys/kern/subr_counter.c      Fri Dec  9 17:58:34 2016        
> (r309745)
> @@ -119,3 +119,57 @@ sysctl_handle_counter_u64_array(SYSCTL_H
>   
>       return (0);
>  }
> +
> +/*
> + * MP-friendly version of ppsratecheck().
> + *
> + * Returns non-negative if we are in the rate, negative otherwise.
> + *  0 - rate limit not reached.
> + * -1 - rate limit reached.
> + * >0 - rate limit was reached before, and was just reset. The return value
> + *      is number of events since last reset.
> + */
> +int64_t
> +counter_ratecheck(struct counter_rate *cr, int64_t limit)
> +{
> +     int64_t val;
> +     int now;
> +
> +     val = cr->cr_over;
> +     now = ticks;
> +
> +     if (now - cr->cr_ticks >= hz) {
> +             /*
> +              * Time to clear the structure, we are in the next second.
> +              * First try unlocked read, and then proceed with atomic.
> +              */
> +             if ((cr->cr_lock == 0) &&
> +                 atomic_cmpset_int(&cr->cr_lock, 0, 1)) {
This should be cmpset_acq to avoid reordering of the operations from
locked region outside of it, and to make _rel below to sync-with.
Or call atomic_thread_fence_acq() after cmpset(), then unneeded barrier
is not executed if atomic failed.

> +                     /*
> +                      * Check if other thread has just went through the
> +                      * reset sequence before us.
> +                      */
> +                     if (now - cr->cr_ticks >= hz) {
> +                             val = counter_u64_fetch(cr->cr_rate);
> +                             counter_u64_zero(cr->cr_rate);
> +                             cr->cr_over = 0;
> +                             cr->cr_ticks = now;
> +                     }
> +                     atomic_store_rel_int(&cr->cr_lock, 0);
> +             } else
> +                     /*
> +                      * We failed to lock, in this case other thread may
> +                      * be running counter_u64_zero(), so it is not safe
> +                      * to do an update, we skip it.
> +                      */
> +                     return (val);
> +     }
> +
> +     counter_u64_add(cr->cr_rate, 1);
What prevents this thread to fail the check for
        now - cr->cr_ticks >= hz
above, then get off the cpu long enough for the next second to tick,
so that another thread starts the cleanup, while this thread performs the
increment ?  The current thread update is lost then.

> +     if (cr->cr_over != 0)
> +             return (-1);
> +     if (counter_u64_fetch(cr->cr_rate) > limit)
> +             val = cr->cr_over = -1;
> +
> +     return (val);
> +}
> 
> Modified: head/sys/sys/counter.h
> ==============================================================================
> --- head/sys/sys/counter.h    Fri Dec  9 17:58:07 2016        (r309744)
> +++ head/sys/sys/counter.h    Fri Dec  9 17:58:34 2016        (r309745)
> @@ -59,5 +59,18 @@ uint64_t   counter_u64_fetch(counter_u64_t
>       for (int i = 0; i < (n); i++)                           \
>               counter_u64_zero((a)[i]);                       \
>  } while (0)
> +
> +/*
> + * counter(9) based rate checking.
> + */
> +struct counter_rate {
> +     counter_u64_t   cr_rate;        /* Events since last second */
> +     volatile int    cr_lock;        /* Lock to clean the struct */
> +     int             cr_ticks;       /* Ticks on last clean */
> +     int             cr_over;        /* Over limit since cr_ticks? */
> +};
> +
> +int64_t      counter_ratecheck(struct counter_rate *, int64_t);
> +
>  #endif       /* _KERNEL */
>  #endif       /* ! __SYS_COUNTER_H__ */
_______________________________________________
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to