On 2/2/21 7:17 AM, Cheng Jiang wrote:
> Remove async inflight packet counter since there is no need to keep
> tracking it. Increase MAX_ENQUEUED_SIZE to prevent packet segment number
> tracking ring from being exhausted.

Is that an optimization or a fix?

If the former, let's move it to v21.05.

> 
> Fixes: a68ba8e0a6b6 ("examples/vhost: refactor vhost data path")
> 
> Signed-off-by: Cheng Jiang <cheng1.ji...@intel.com>
> ---
> v3: fixed fixes commit id in git log
> 
> v2: fixed a typo
> 
>  examples/vhost/ioat.h |  2 +-
>  examples/vhost/main.c | 10 +---------
>  examples/vhost/main.h |  1 -
>  3 files changed, 2 insertions(+), 11 deletions(-)
> 
> diff --git a/examples/vhost/ioat.h b/examples/vhost/ioat.h
> index 0a1dbb811..1aa28ed6a 100644
> --- a/examples/vhost/ioat.h
> +++ b/examples/vhost/ioat.h
> @@ -11,7 +11,7 @@
> 
>  #define MAX_VHOST_DEVICE 1024
>  #define IOAT_RING_SIZE 4096
> -#define MAX_ENQUEUED_SIZE 512
> +#define MAX_ENQUEUED_SIZE 4096

Basically, this the size of the ring size, correct?
It might be further simplified then.

> 
>  struct dma_info {
>       struct rte_pci_addr addr;
> diff --git a/examples/vhost/main.c b/examples/vhost/main.c
> index e74fc8750..ca73e7086 100644
> --- a/examples/vhost/main.c
> +++ b/examples/vhost/main.c
> @@ -831,11 +831,8 @@ complete_async_pkts(struct vhost_dev *vdev)
> 
>       complete_count = rte_vhost_poll_enqueue_completed(vdev->vid,
>                                       VIRTIO_RXQ, p_cpl, MAX_PKT_BURST);
> -     if (complete_count) {
> -             __atomic_sub_fetch(&vdev->nr_async_pkts, complete_count,
> -                     __ATOMIC_SEQ_CST);
> +     if (complete_count)
>               free_pkts(p_cpl, complete_count);
> -     }
>  }
> 
>  static __rte_always_inline void
> @@ -878,8 +875,6 @@ drain_vhost(struct vhost_dev *vdev)
>               complete_async_pkts(vdev);
>               ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ,
>                                       m, nr_xmit, m_cpu_cpl, &cpu_cpl_nr);
> -             __atomic_add_fetch(&vdev->nr_async_pkts, ret - cpu_cpl_nr,
> -                             __ATOMIC_SEQ_CST);
> 
>               if (cpu_cpl_nr)
>                       free_pkts(m_cpu_cpl, cpu_cpl_nr);
> @@ -1210,9 +1205,6 @@ drain_eth_rx(struct vhost_dev *vdev)
>               enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid,
>                                       VIRTIO_RXQ, pkts, rx_count,
>                                       m_cpu_cpl, &cpu_cpl_nr);
> -             __atomic_add_fetch(&vdev->nr_async_pkts,
> -                                     enqueue_count - cpu_cpl_nr,
> -                                     __ATOMIC_SEQ_CST);
>               if (cpu_cpl_nr)
>                       free_pkts(m_cpu_cpl, cpu_cpl_nr);
> 
> diff --git a/examples/vhost/main.h b/examples/vhost/main.h
> index 2d6c05fd7..0ccdce4b4 100644
> --- a/examples/vhost/main.h
> +++ b/examples/vhost/main.h
> @@ -51,7 +51,6 @@ struct vhost_dev {
>       uint64_t features;
>       size_t hdr_len;
>       uint16_t nr_vrings;
> -     uint64_t nr_async_pkts;
>       struct rte_vhost_memory *mem;
>       struct device_statistics stats;
>       TAILQ_ENTRY(vhost_dev) global_vdev_entry;
> --
> 2.29.2
> 

Reply via email to