On Thu, 23 Feb 2017 17:24:05 +0000, Bruce Richardson 
<bruce.richard...@intel.com> wrote:
> We can write a single common function for head manipulation for enq
> and a common one for deq, allowing us to have a single worker function
> for enq and deq, rather than two of each. Update all other inline
> functions to use the new functions.
> 
> Signed-off-by: Bruce Richardson <bruce.richard...@intel.com>
> ---
>  lib/librte_ring/rte_ring.c |   4 +-
>  lib/librte_ring/rte_ring.h | 328 
> ++++++++++++++++++++-------------------------
>  2 files changed, 149 insertions(+), 183 deletions(-)
> 

[...]

> +static inline __attribute__((always_inline)) unsigned int
> +__rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
> +              unsigned int n, enum rte_ring_queue_behavior behavior,
> +              int is_sp, unsigned int *free_space)
>  {
> -     uint32_t prod_head, cons_tail;
> -     uint32_t prod_next, free_entries;
> -     uint32_t mask = r->mask;
> -
> -     prod_head = r->prod.head;
> -     cons_tail = r->cons.tail;
> -     /* The subtraction is done between two unsigned 32bits value
> -      * (the result is always modulo 32 bits even if we have
> -      * prod_head > cons_tail). So 'free_entries' is always between 0
> -      * and size(ring)-1. */
> -     free_entries = mask + cons_tail - prod_head;
> -
> -     /* check that we have enough room in ring */
> -     if (unlikely(n > free_entries))
> -             n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : free_entries;
> +     uint32_t prod_head, prod_next;
> +     uint32_t free_entries;
>  
> +     n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
> +                     &prod_head, &prod_next, &free_entries);
>       if (n == 0)
>               goto end;
>  
> -
> -     prod_next = prod_head + n;
> -     r->prod.head = prod_next;
> -
> -     /* write entries in ring */
>       ENQUEUE_PTRS();
>       rte_smp_wmb();
>  
> +     /*
> +      * If there are other enqueues in progress that preceded us,
> +      * we need to wait for them to complete
> +      */
> +     while (unlikely(r->prod.tail != prod_head))
> +             rte_pause();
> +

I'd say this part should not be done in case is_sp == 1.
Since it is sometimes a constant arg in an inline func, it may be better
to add the if (is_sp == 0).

[...]

> +static inline __attribute__((always_inline)) unsigned int
> +__rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,
>                unsigned int n, enum rte_ring_queue_behavior behavior,
> -              unsigned int *available)
> +              int is_mp, unsigned int *available)
>  {
> -     uint32_t cons_head, prod_tail;
> -     uint32_t cons_next, entries;
> -     uint32_t mask = r->mask;
> -
> -     cons_head = r->cons.head;
> -     prod_tail = r->prod.tail;
> -     /* The subtraction is done between two unsigned 32bits value
> -      * (the result is always modulo 32 bits even if we have
> -      * cons_head > prod_tail). So 'entries' is always between 0
> -      * and size(ring)-1. */
> -     entries = prod_tail - cons_head;
> -
> -     if (n > entries)
> -             n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : entries;
> -
> -     if (unlikely(entries == 0))
> -             goto end;
> +     uint32_t cons_head, cons_next;
> +     uint32_t entries;
>  
> -     cons_next = cons_head + n;
> -     r->cons.head = cons_next;
> +     n = __rte_ring_move_cons_head(r, is_mp, n, behavior,
> +                     &cons_head, &cons_next, &entries);
> +     if (n == 0)
> +             goto end;
>  
> -     /* copy in table */
>       DEQUEUE_PTRS();
>       rte_smp_rmb();
>  
> +     /*
> +      * If there are other enqueues in progress that preceded us,
> +      * we need to wait for them to complete
> +      */
> +     while (unlikely(r->cons.tail != cons_head))
> +             rte_pause();
> +
>       r->cons.tail = cons_next;

Same here.

Reply via email to