On 9/19/2018 2:42 PM, dev-boun...@dpdk.org wrote:
> With existing code in kni_fifo_put, rx_q values are not being updated
> before updating fifo_write. While reading rx_q in kni_net_rx_normal,
> This is causing the sync issue on other core. The same situation happens
> in kni_fifo_get as well.
> 
> So syncing the values by adding C11 atomic memory barriers to make sure
> the values being synced before updating fifo_write and fifo_read.
> 
> Fixes: 3fc5ca2 ("kni: initial import")
> Signed-off-by: Phil Yang <phil.y...@arm.com>
> Reviewed-by: Honnappa Nagarahalli <honnappa.nagaraha...@arm.com>
> Reviewed-by: Gavin Hu <gavin...@arm.com>
> ---
>  .../linuxapp/eal/include/exec-env/rte_kni_common.h |  5 ++++
>  lib/librte_kni/rte_kni_fifo.h                      | 30 
> +++++++++++++++++++++-
>  2 files changed, 34 insertions(+), 1 deletion(-)
> 
> diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h 
> b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
> index cfa9448..1fd713b 100644
> --- a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
> +++ b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
> @@ -54,8 +54,13 @@ struct rte_kni_request {
>   * Writing should never overwrite the read position
>   */
>  struct rte_kni_fifo {
> +#ifndef RTE_USE_C11_MEM_MODEL
>       volatile unsigned write;     /**< Next position to be written*/
>       volatile unsigned read;      /**< Next position to be read */
> +#else
> +     unsigned write;              /**< Next position to be written*/
> +     unsigned read;               /**< Next position to be read */
> +#endif
>       unsigned len;                /**< Circular buffer length */
>       unsigned elem_size;          /**< Pointer size - for 32/64 bit OS */
>       void *volatile buffer[];     /**< The buffer contains mbuf pointers */
> diff --git a/lib/librte_kni/rte_kni_fifo.h b/lib/librte_kni/rte_kni_fifo.h
> index ac26a8c..f4171a1 100644
> --- a/lib/librte_kni/rte_kni_fifo.h
> +++ b/lib/librte_kni/rte_kni_fifo.h
> @@ -28,8 +28,13 @@ kni_fifo_put(struct rte_kni_fifo *fifo, void **data, 
> unsigned num)
>  {
>       unsigned i = 0;
>       unsigned fifo_write = fifo->write;
> -     unsigned fifo_read = fifo->read;
>       unsigned new_write = fifo_write;
> +#ifdef RTE_USE_C11_MEM_MODEL
> +     unsigned fifo_read = __atomic_load_n(&fifo->read,
> +                                              __ATOMIC_ACQUIRE);
> +#else
> +     unsigned fifo_read = fifo->read;
> +#endif

Why atomic load preferred against "volatile", won't both end up accessing
memory, is atomic load faster?

>  
>       for (i = 0; i < num; i++) {
>               new_write = (new_write + 1) & (fifo->len - 1);
> @@ -39,7 +44,12 @@ kni_fifo_put(struct rte_kni_fifo *fifo, void **data, 
> unsigned num)
>               fifo->buffer[fifo_write] = data[i];
>               fifo_write = new_write;
>       }
> +#ifdef RTE_USE_C11_MEM_MODEL
> +     __atomic_store_n(&fifo->write, fifo_write, __ATOMIC_RELEASE);
> +#else
> +     rte_smp_wmb();
>       fifo->write = fifo_write;
> +#endif

How atomic store guaranties "fifo->buffer[fifo_write] = data[i];" will wait
"fifo->write = fifo_write;"? Is atomic store also behave as write memory 
barrier?

Reply via email to